hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
edd5837d104a916d27ca1aa0127a1deeb3ced066.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#define N 1024
//Interleave addressing kernel_version
__global__ void interleaved_reduce(int *d_in, int *d_out)
{
//using shared memory
__shared__ int sm[N];
int i = threadIdx.x;
int id = blockIdx.x * blockDim.x + threadIdx.x;
sm[i] = d_in[id];
__syncthreads();
/*int M = N/2;
for (int s = 1; s <= N; s *= 2){
if (i < M){
printf("stride = %d, thread %d is active\n" , s, i);
d_in[(2*s)*i] = d_in[(2*s)*i] + d_in[(2*s)*i+s];
}
M = M/2;
}
if (i == 0)
d_out[0] = d_in[0];
*/
for (int s = 1; s < blockDim.x; s *= 2){
int i = 2 * s * id;
if (i < blockDim.x){
sm[i] += sm[i+s];
}
__syncthreads();
}
if (i == 0)
d_out[blockIdx.x] = sm[0];
}
//Contiguous addressing kernel version
__global__ void contiguous_reduce(int *d_in, int *d_out)
{
/*
int i = threadIdx.x;
int M = N/2;
for (int s = M; s > 0; s /= 2){
if (i < M){
printf("stride = %d, thread %d is active\n" , s, i);
d_in[i] = d_in[i] + d_in[i+s];
}
M = M/2;
}
if (i == 0)
d_out[0] = d_in[0];
*/
//using shared memory
__shared__ int sm[N];
int i = threadIdx.x;
int id = blockIdx.x * blockDim.x + threadIdx.x;
sm[i] = d_in[id];
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2){
if (i < s){
sm[i] += sm[i+s];
}
__syncthreads();
}
if (i == 0)
d_out[blockIdx.x] = sm[0];
}
int main()
{
int h_in[N];
int h_out = 0;
//timer
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
for(int i = 0; i < N; i++)
h_in[i] = i+1;
int *d_in, *d_out;
hipMalloc((void**) &d_in, N*sizeof(int));
hipMalloc((void**) &d_out, sizeof(int));
hipMemcpy(d_in, &h_in, N*sizeof(int), hipMemcpyHostToDevice);
//kernel call
// ==================== interleaved_reduce =======================
/*
hipEventRecord(start);
interleaved_reduce<<<1, 1024>>>(d_in, d_out);
hipEventRecord(stop);
*/
// =================== contiguous_reduce =========================
hipEventRecord(start);
hipLaunchKernelGGL(( contiguous_reduce), dim3(1), dim3(1024), 0, 0, d_in, d_out);
hipEventRecord(stop);
hipMemcpy(&h_out, d_out, sizeof(int), hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float ms = 0;
hipEventElapsedTime(&ms, start, stop);
hipFree(d_in);
hipFree(d_out);
printf("Output %d\n", h_out);
printf("Time used: %f milliseconds\n", ms);
return -1;
}
|
edd5837d104a916d27ca1aa0127a1deeb3ced066.cu
|
#include <stdio.h>
#include <math.h>
#define N 1024
//Interleave addressing kernel_version
__global__ void interleaved_reduce(int *d_in, int *d_out)
{
//using shared memory
__shared__ int sm[N];
int i = threadIdx.x;
int id = blockIdx.x * blockDim.x + threadIdx.x;
sm[i] = d_in[id];
__syncthreads();
/*int M = N/2;
for (int s = 1; s <= N; s *= 2){
if (i < M){
printf("stride = %d, thread %d is active\n" , s, i);
d_in[(2*s)*i] = d_in[(2*s)*i] + d_in[(2*s)*i+s];
}
M = M/2;
}
if (i == 0)
d_out[0] = d_in[0];
*/
for (int s = 1; s < blockDim.x; s *= 2){
int i = 2 * s * id;
if (i < blockDim.x){
sm[i] += sm[i+s];
}
__syncthreads();
}
if (i == 0)
d_out[blockIdx.x] = sm[0];
}
//Contiguous addressing kernel version
__global__ void contiguous_reduce(int *d_in, int *d_out)
{
/*
int i = threadIdx.x;
int M = N/2;
for (int s = M; s > 0; s /= 2){
if (i < M){
printf("stride = %d, thread %d is active\n" , s, i);
d_in[i] = d_in[i] + d_in[i+s];
}
M = M/2;
}
if (i == 0)
d_out[0] = d_in[0];
*/
//using shared memory
__shared__ int sm[N];
int i = threadIdx.x;
int id = blockIdx.x * blockDim.x + threadIdx.x;
sm[i] = d_in[id];
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2){
if (i < s){
sm[i] += sm[i+s];
}
__syncthreads();
}
if (i == 0)
d_out[blockIdx.x] = sm[0];
}
int main()
{
int h_in[N];
int h_out = 0;
//timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for(int i = 0; i < N; i++)
h_in[i] = i+1;
int *d_in, *d_out;
cudaMalloc((void**) &d_in, N*sizeof(int));
cudaMalloc((void**) &d_out, sizeof(int));
cudaMemcpy(d_in, &h_in, N*sizeof(int), cudaMemcpyHostToDevice);
//kernel call
// ==================== interleaved_reduce =======================
/*
cudaEventRecord(start);
interleaved_reduce<<<1, 1024>>>(d_in, d_out);
cudaEventRecord(stop);
*/
// =================== contiguous_reduce =========================
cudaEventRecord(start);
contiguous_reduce<<<1, 1024>>>(d_in, d_out);
cudaEventRecord(stop);
cudaMemcpy(&h_out, d_out, sizeof(int), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
cudaFree(d_in);
cudaFree(d_out);
printf("Output %d\n", h_out);
printf("Time used: %f milliseconds\n", ms);
return -1;
}
|
7dbe6d1b75a253ed17fb412bdc583e024c16b6ae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************************************
* Filename: vigenere.c
* Author: programmingalgorithms.com
* Modified by: Gustavo Estrela and Bruno Sesso
* Copyright:
* Disclaimer: This code is presented "as is" without any guarantees.
* Details: Code available at
* https://www.programmingalgorithms.com/algorithm/
* vigenere-cipher?lang=C
*********************************************************************/
extern "C" {
#include "vigenere_cu.h"
}
__device__
int mod(int a, int b)
{
return (a % b + b) % b;
}
__device__
int is_alpha(char c)
{
if (c >= 'A' && c <= 'Z' ||
c >= 'a' && c <= 'z')
return 1;
else
return 0;
}
__device__
int is_upper(char c)
{
if (c <= 'Z')
return 1;
else
return 0;
}
__device__
char to_upper(char c)
{
if ('a' <= c && c <= 'z')
return 'A' + c - 'a';
else
return c;
}
__device__
char to_lower(char c)
{
if ('A' <= c && c <= 'Z')
return 'a' + c - 'A';
else
return c;
}
__global__
void cipher(char *input, char *output, char *key, int encipher, int len,
int key_len)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= len) return;
if (is_alpha(input[i]))
{
int c_is_upper = is_upper(input[i]);
char offset = c_is_upper ? 'A' : 'a';
int key_index = i % key_len;
char k;
if (c_is_upper)
k = to_upper(key[key_index]);
else
k = to_lower(key[key_index]) - offset;
k = encipher ? k : -k;
char ch = (char)((mod(((input[i] + k) - offset), 26)) + offset);
output[i] = ch;
}
else
output[i] = input[i];
}
void cipher(char *input, char *output, char *key, int encipher)
{
char *cuda_in, *cuda_out, *cuda_key;
int key_len = strlen(key);
int len = strlen(input);
int cuda_block_size = nWarps * 32;
int cuda_blocks;
hipMalloc(&cuda_in, sizeof(char) * len);
checkCudaErr();
hipMalloc(&cuda_out, sizeof(char) * len);
checkCudaErr();
hipMalloc(&cuda_key, sizeof(char) * key_len);
checkCudaErr();
hipMemcpy(cuda_in, input, len * sizeof(char),
hipMemcpyHostToDevice);
checkCudaErr();
hipMemcpy(cuda_key, key, key_len * sizeof(char),
hipMemcpyHostToDevice);
checkCudaErr();
cuda_blocks = (len + cuda_block_size + 1) / cuda_block_size;
hipLaunchKernelGGL(( cipher), dim3(cuda_blocks), dim3(cuda_block_size), 0, 0,
cuda_in, cuda_out, cuda_key, encipher, len, key_len);
checkCudaErr();
hipMemcpy(output, cuda_out, len * sizeof(char),
hipMemcpyDeviceToHost);
checkCudaErr();
output[len] = '\0';
hipFree(cuda_in);
hipFree(cuda_out);
hipFree(cuda_key);
}
int encipher(char *input, char *output, char *key)
{
cipher(input, output, key, 1);
/*printf("|%s|\n", output);*/
/*printf("output len: %d\n", strlen(output));*/
output[strlen(output)] = '\0';
return strlen(output);
}
int decipher(char *input, char *output, char *key)
{
cipher(input, output, key, 0);
/*printf ("|%s|\n", output);*/
/*printf("output len: %d\n", strlen(output));*/
output[strlen(output)] = '\0';
return strlen(output);
}
|
7dbe6d1b75a253ed17fb412bdc583e024c16b6ae.cu
|
/*********************************************************************
* Filename: vigenere.c
* Author: programmingalgorithms.com
* Modified by: Gustavo Estrela and Bruno Sesso
* Copyright:
* Disclaimer: This code is presented "as is" without any guarantees.
* Details: Code available at
* https://www.programmingalgorithms.com/algorithm/
* vigenere-cipher?lang=C
*********************************************************************/
extern "C" {
#include "vigenere_cu.h"
}
__device__
int mod(int a, int b)
{
return (a % b + b) % b;
}
__device__
int is_alpha(char c)
{
if (c >= 'A' && c <= 'Z' ||
c >= 'a' && c <= 'z')
return 1;
else
return 0;
}
__device__
int is_upper(char c)
{
if (c <= 'Z')
return 1;
else
return 0;
}
__device__
char to_upper(char c)
{
if ('a' <= c && c <= 'z')
return 'A' + c - 'a';
else
return c;
}
__device__
char to_lower(char c)
{
if ('A' <= c && c <= 'Z')
return 'a' + c - 'A';
else
return c;
}
__global__
void cipher(char *input, char *output, char *key, int encipher, int len,
int key_len)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= len) return;
if (is_alpha(input[i]))
{
int c_is_upper = is_upper(input[i]);
char offset = c_is_upper ? 'A' : 'a';
int key_index = i % key_len;
char k;
if (c_is_upper)
k = to_upper(key[key_index]);
else
k = to_lower(key[key_index]) - offset;
k = encipher ? k : -k;
char ch = (char)((mod(((input[i] + k) - offset), 26)) + offset);
output[i] = ch;
}
else
output[i] = input[i];
}
void cipher(char *input, char *output, char *key, int encipher)
{
char *cuda_in, *cuda_out, *cuda_key;
int key_len = strlen(key);
int len = strlen(input);
int cuda_block_size = nWarps * 32;
int cuda_blocks;
cudaMalloc(&cuda_in, sizeof(char) * len);
checkCudaErr();
cudaMalloc(&cuda_out, sizeof(char) * len);
checkCudaErr();
cudaMalloc(&cuda_key, sizeof(char) * key_len);
checkCudaErr();
cudaMemcpy(cuda_in, input, len * sizeof(char),
cudaMemcpyHostToDevice);
checkCudaErr();
cudaMemcpy(cuda_key, key, key_len * sizeof(char),
cudaMemcpyHostToDevice);
checkCudaErr();
cuda_blocks = (len + cuda_block_size + 1) / cuda_block_size;
cipher<<<cuda_blocks, cuda_block_size>>>
(cuda_in, cuda_out, cuda_key, encipher, len, key_len);
checkCudaErr();
cudaMemcpy(output, cuda_out, len * sizeof(char),
cudaMemcpyDeviceToHost);
checkCudaErr();
output[len] = '\0';
cudaFree(cuda_in);
cudaFree(cuda_out);
cudaFree(cuda_key);
}
int encipher(char *input, char *output, char *key)
{
cipher(input, output, key, 1);
/*printf("|%s|\n", output);*/
/*printf("output len: %d\n", strlen(output));*/
output[strlen(output)] = '\0';
return strlen(output);
}
int decipher(char *input, char *output, char *key)
{
cipher(input, output, key, 0);
/*printf ("|%s|\n", output);*/
/*printf("output len: %d\n", strlen(output));*/
output[strlen(output)] = '\0';
return strlen(output);
}
|
b190205816c1d45e045e74a452fed973289c17dd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cublas_manager.h"
#include "../../cuda_err_check.h"
cublas_manager::cublas_manager() :
n_(-1),
num_batches_(-1),
factored_(false)
{
hipblasCreate(&cublas_handle_);
}
cublas_manager::~cublas_manager()
{
if(factored_) {
FreeDeviceMemory();
hipblasDestroy(cublas_handle_);
}
}
void cublas_manager::setup_memory()
{
if(factored_) {
FreeDeviceMemory();
}
AllocateDeviceMemory();
}
void cublas_manager::AllocateDeviceMemory()
{
hipDeviceSynchronize();
cuda_err_check(hipGetLastError());
cuda_err_check(hipMalloc((void**)&matrix_inverse_dev_,sizeof(double)*(n_*n_*num_batches_)));
cuda_err_check(hipMalloc((void**)&matrix_inverse_pointers_dev_,sizeof(double*)*num_batches_));
cuda_err_check(hipMalloc((void**)&matrix_pointers_dev_,sizeof(double*)*num_batches_));
cuda_err_check(hipMalloc((void**)&info_dev_,sizeof(int)*num_batches_));
cuda_err_check(hipMalloc((void**)&tmp_dev_,sizeof(double)*num_batches_*n_));
cuda_err_check(hipMalloc((void**)&tmp_pointers_dev_,sizeof(double*)*num_batches_));
data_ptrs_.resize(num_batches_);
tmp_ptrs_.resize(num_batches_);
for(int j = 0; j < num_batches_; ++j) {
data_ptrs_[j] = matrix_inverse_dev_ + j*n_*n_;
}
hipMemcpy(matrix_inverse_pointers_dev_, data_ptrs_.data(), sizeof(double*)*num_batches_, hipMemcpyHostToDevice);
cuda_err_check(hipGetLastError());
for(int j = 0; j < num_batches_; ++j) {
tmp_ptrs_[j] = tmp_dev_ + j*n_;
}
hipMemcpy(tmp_pointers_dev_, tmp_ptrs_.data(), sizeof(double*)*num_batches_, hipMemcpyHostToDevice);
cuda_err_check(hipGetLastError());
}
void cublas_manager::FreeDeviceMemory()
{
hipFree(matrix_inverse_dev_);
hipFree(matrix_inverse_pointers_dev_);
hipFree(matrix_pointers_dev_);
hipFree(info_dev_);
hipFree(tmp_dev_);
hipFree(tmp_pointers_dev_);
}
int cublas_manager::factor_invert(int num_batches, int n, double* values) {
if(n != n_ || num_batches != num_batches_) {
n_ = n;
num_batches_ = num_batches;
setup_memory();
}
if(values == NULL) {
return 1;
}
bool need_tx = false;
for(int j = 0; j < num_batches_; ++j) {
if(data_ptrs_[j] != values + j*n_*n_) {
data_ptrs_[j] = values + j*n_*n_;
need_tx = true;
}
}
if(need_tx) {
hipMemcpy(matrix_pointers_dev_, data_ptrs_.data(), sizeof(double*)*num_batches_, hipMemcpyHostToDevice);
}
int lda = n_;
int* ipiv = NULL; //Turns off pivoting
hipblasDgetrfBatched(cublas_handle_, n_,
matrix_pointers_dev_, lda,
ipiv, info_dev_, num_batches_);
int ldc = n_;
const double** const_matrix_pointers_dev = (const double**) matrix_pointers_dev_;
hipblasDgetriBatched(cublas_handle_, n_, const_matrix_pointers_dev,
lda, ipiv, matrix_inverse_pointers_dev_,
ldc, info_dev_, num_batches_);
int ierr = 0;
#ifdef ZERORK_FULL_DEBUG
info_.resize(num_batches_);
cuda_err_check(hipMemcpy(info_.data(), info_dev_, num_batches_*sizeof(int), hipMemcpyDeviceToHost));
//Check for errors
// factor_error > 0, singular matrix, zero diagonal at row,col = factor_error
// factor_error = 0, success
// factor_error < 0, illegal input
for(int i=0; i < num_batches_; ++i) {
if(info_[i]!=0) {
ierr = info_[i];
break;
}
}
#endif
factored_ = true;
return ierr;
}
int cublas_manager::factor_lu(int num_batches, int n, double* values) {
if(n != n_ || num_batches != num_batches_) {
n_ = n;
num_batches_ = num_batches;
setup_memory();
}
if(values == NULL) {
return 1;
}
bool need_tx = false;
for(int j = 0; j < num_batches_; ++j) {
if(data_ptrs_[j] != values + j*n_*n_) {
data_ptrs_[j] = values + j*n_*n_;
need_tx = true;
}
}
if(need_tx) {
hipMemcpy(matrix_pointers_dev_, data_ptrs_.data(), sizeof(double*)*num_batches_, hipMemcpyHostToDevice);
}
int lda = n_;
int* ipiv = NULL; //Turns off pivoting
hipblasDgetrfBatched(cublas_handle_, n_,
matrix_pointers_dev_, lda,
ipiv, info_dev_, num_batches_);
int ierr = 0;
#ifdef ZERORK_FULL_DEBUG
info_.resize(num_batches_);
cuda_err_check(hipMemcpy(info_.data(), info_dev_, num_batches_*sizeof(int), hipMemcpyDeviceToHost));
//Check for errors
// factor_error > 0, singular matrix, zero diagonal at row,col = factor_error
// factor_error = 0, success
// factor_error < 0, illegal input
for(int i=0; i < num_batches_; ++i) {
if(info_[i]!=0) {
ierr = info_[i];
break;
}
}
#endif
factored_ = true;
return ierr;
}
//The following modified from cuda sdk-5.0
#define TRANSPOSE_TILE_DIM 32
#define TRANSPOSE_BLOCK_ROWS 8
static __global__ void CUBLAS_MANAGER_TransposeNoBankConflicts(double *odata, const double *idata, const int width, const int height)
{
__shared__ double tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1];
int xIndex,yIndex,index_in,index_out;
xIndex = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.y;
index_in = xIndex + (yIndex)*width;
for (int i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if(xIndex < width && yIndex+i < height){
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];}
}
__syncthreads();
xIndex = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.y;
index_out = xIndex + (yIndex)*height;
for (int i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if(yIndex+i < width && xIndex < height){
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];}
}
}
void cublas_manager::cuda_transpose(double* odata, const double* idata, const int width, const int height)
{
// Put df/dy in "normal" order
dim3 nBlocks2D,nThreads2D;
nThreads2D.x = TRANSPOSE_TILE_DIM;
nThreads2D.y = TRANSPOSE_BLOCK_ROWS;
nBlocks2D.x = (width+TRANSPOSE_TILE_DIM-1)/TRANSPOSE_TILE_DIM;
nBlocks2D.y = (height+TRANSPOSE_TILE_DIM-1)/TRANSPOSE_TILE_DIM;
hipLaunchKernelGGL(( CUBLAS_MANAGER_TransposeNoBankConflicts), dim3(nBlocks2D),dim3(nThreads2D), 0, 0, odata,idata,width,height);
#ifdef ZERORK_FULL_DEBUG
cuda_err_check( hipPeekAtLastError() );
cuda_err_check( hipDeviceSynchronize() );
#endif
}
static void __global__ CUBLAS_MANAGER_cuda_bdmv_kernel
(
const int mtx_block_size,
const int num_mtx_blocks,
const double* A_dev,
const double* X_dev ,
double * Y_dev
)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int stride = gridDim.x*blockDim.x;
for( ; tidx < num_mtx_blocks*mtx_block_size; tidx += stride)
{
int local_row = tidx % mtx_block_size;
int local_block = tidx / mtx_block_size;
double Y_dev_accum = 0.0;
for(int i = 0; i < mtx_block_size; ++i) //columns
{
int data_idx = mtx_block_size*mtx_block_size*local_block + mtx_block_size*i + local_row;
Y_dev_accum += A_dev[data_idx]*X_dev[i+local_block*mtx_block_size];
}
Y_dev[local_row+local_block*mtx_block_size] = Y_dev_accum;
}
}
int cublas_manager::cuda_bdmv(int n, int nbatch, double* A_dev, double* B_dev, double* Y_dev)
{
int threads = ::min(n*nbatch,1024);
int blocks=(nbatch*n+threads-1)/threads;
hipLaunchKernelGGL(( CUBLAS_MANAGER_cuda_bdmv_kernel), dim3(blocks),dim3(threads), 0, 0, n, nbatch, A_dev, B_dev, Y_dev);
#ifdef ZERORK_FULL_DEBUG
cuda_err_check(hipPeekAtLastError());
cuda_err_check(hipDeviceSynchronize());
#endif
return 0;
}
int cublas_manager::solve_invert(int num_batches, int n, const double* rhs, double* soln) {
if(n != n_ || num_batches != num_batches_) {
return 1;
}
// Transpose rhs into soln
cuda_transpose(soln,rhs,num_batches_,n_);
// Block-diagonal matrix vector multiplication
cuda_bdmv(n_, num_batches_, matrix_inverse_dev_, soln, tmp_dev_);
// Put tmp back into block order
cuda_transpose(soln,tmp_dev_,n_,num_batches_);
return(0);
}
int cublas_manager::solve_lu(int num_batches, int n, const double* rhs, double* soln) {
if(n != n_ || num_batches != num_batches_) {
return 1;
}
// Transpose rhs into tmp_dev_
cuda_transpose(tmp_dev_,rhs,num_batches_,n_);
// CUBLAS forward and back substitution
int* ipiv = NULL; //Turns off pivoting
int lda = n_;
int ldb = n_;
int info = 0;
hipblasDgetrsBatched(cublas_handle_, HIPBLAS_OP_N, n_, 1,
matrix_pointers_dev_, lda,
ipiv, tmp_pointers_dev_, ldb, &info, num_batches_);
// Put tmp back into block order
cuda_transpose(soln,tmp_dev_,n_,num_batches_);
return(0);
}
|
b190205816c1d45e045e74a452fed973289c17dd.cu
|
#include "cublas_manager.h"
#include "../../cuda_err_check.h"
cublas_manager::cublas_manager() :
n_(-1),
num_batches_(-1),
factored_(false)
{
cublasCreate(&cublas_handle_);
}
cublas_manager::~cublas_manager()
{
if(factored_) {
FreeDeviceMemory();
cublasDestroy(cublas_handle_);
}
}
void cublas_manager::setup_memory()
{
if(factored_) {
FreeDeviceMemory();
}
AllocateDeviceMemory();
}
void cublas_manager::AllocateDeviceMemory()
{
cudaDeviceSynchronize();
cuda_err_check(cudaGetLastError());
cuda_err_check(cudaMalloc((void**)&matrix_inverse_dev_,sizeof(double)*(n_*n_*num_batches_)));
cuda_err_check(cudaMalloc((void**)&matrix_inverse_pointers_dev_,sizeof(double*)*num_batches_));
cuda_err_check(cudaMalloc((void**)&matrix_pointers_dev_,sizeof(double*)*num_batches_));
cuda_err_check(cudaMalloc((void**)&info_dev_,sizeof(int)*num_batches_));
cuda_err_check(cudaMalloc((void**)&tmp_dev_,sizeof(double)*num_batches_*n_));
cuda_err_check(cudaMalloc((void**)&tmp_pointers_dev_,sizeof(double*)*num_batches_));
data_ptrs_.resize(num_batches_);
tmp_ptrs_.resize(num_batches_);
for(int j = 0; j < num_batches_; ++j) {
data_ptrs_[j] = matrix_inverse_dev_ + j*n_*n_;
}
cudaMemcpy(matrix_inverse_pointers_dev_, data_ptrs_.data(), sizeof(double*)*num_batches_, cudaMemcpyHostToDevice);
cuda_err_check(cudaGetLastError());
for(int j = 0; j < num_batches_; ++j) {
tmp_ptrs_[j] = tmp_dev_ + j*n_;
}
cudaMemcpy(tmp_pointers_dev_, tmp_ptrs_.data(), sizeof(double*)*num_batches_, cudaMemcpyHostToDevice);
cuda_err_check(cudaGetLastError());
}
void cublas_manager::FreeDeviceMemory()
{
cudaFree(matrix_inverse_dev_);
cudaFree(matrix_inverse_pointers_dev_);
cudaFree(matrix_pointers_dev_);
cudaFree(info_dev_);
cudaFree(tmp_dev_);
cudaFree(tmp_pointers_dev_);
}
int cublas_manager::factor_invert(int num_batches, int n, double* values) {
if(n != n_ || num_batches != num_batches_) {
n_ = n;
num_batches_ = num_batches;
setup_memory();
}
if(values == NULL) {
return 1;
}
bool need_tx = false;
for(int j = 0; j < num_batches_; ++j) {
if(data_ptrs_[j] != values + j*n_*n_) {
data_ptrs_[j] = values + j*n_*n_;
need_tx = true;
}
}
if(need_tx) {
cudaMemcpy(matrix_pointers_dev_, data_ptrs_.data(), sizeof(double*)*num_batches_, cudaMemcpyHostToDevice);
}
int lda = n_;
int* ipiv = NULL; //Turns off pivoting
cublasDgetrfBatched(cublas_handle_, n_,
matrix_pointers_dev_, lda,
ipiv, info_dev_, num_batches_);
int ldc = n_;
const double** const_matrix_pointers_dev = (const double**) matrix_pointers_dev_;
cublasDgetriBatched(cublas_handle_, n_, const_matrix_pointers_dev,
lda, ipiv, matrix_inverse_pointers_dev_,
ldc, info_dev_, num_batches_);
int ierr = 0;
#ifdef ZERORK_FULL_DEBUG
info_.resize(num_batches_);
cuda_err_check(cudaMemcpy(info_.data(), info_dev_, num_batches_*sizeof(int), cudaMemcpyDeviceToHost));
//Check for errors
// factor_error > 0, singular matrix, zero diagonal at row,col = factor_error
// factor_error = 0, success
// factor_error < 0, illegal input
for(int i=0; i < num_batches_; ++i) {
if(info_[i]!=0) {
ierr = info_[i];
break;
}
}
#endif
factored_ = true;
return ierr;
}
int cublas_manager::factor_lu(int num_batches, int n, double* values) {
if(n != n_ || num_batches != num_batches_) {
n_ = n;
num_batches_ = num_batches;
setup_memory();
}
if(values == NULL) {
return 1;
}
bool need_tx = false;
for(int j = 0; j < num_batches_; ++j) {
if(data_ptrs_[j] != values + j*n_*n_) {
data_ptrs_[j] = values + j*n_*n_;
need_tx = true;
}
}
if(need_tx) {
cudaMemcpy(matrix_pointers_dev_, data_ptrs_.data(), sizeof(double*)*num_batches_, cudaMemcpyHostToDevice);
}
int lda = n_;
int* ipiv = NULL; //Turns off pivoting
cublasDgetrfBatched(cublas_handle_, n_,
matrix_pointers_dev_, lda,
ipiv, info_dev_, num_batches_);
int ierr = 0;
#ifdef ZERORK_FULL_DEBUG
info_.resize(num_batches_);
cuda_err_check(cudaMemcpy(info_.data(), info_dev_, num_batches_*sizeof(int), cudaMemcpyDeviceToHost));
//Check for errors
// factor_error > 0, singular matrix, zero diagonal at row,col = factor_error
// factor_error = 0, success
// factor_error < 0, illegal input
for(int i=0; i < num_batches_; ++i) {
if(info_[i]!=0) {
ierr = info_[i];
break;
}
}
#endif
factored_ = true;
return ierr;
}
//The following modified from cuda sdk-5.0
#define TRANSPOSE_TILE_DIM 32
#define TRANSPOSE_BLOCK_ROWS 8
static __global__ void CUBLAS_MANAGER_TransposeNoBankConflicts(double *odata, const double *idata, const int width, const int height)
{
__shared__ double tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1];
int xIndex,yIndex,index_in,index_out;
xIndex = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.y;
index_in = xIndex + (yIndex)*width;
for (int i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if(xIndex < width && yIndex+i < height){
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];}
}
__syncthreads();
xIndex = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.y;
index_out = xIndex + (yIndex)*height;
for (int i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if(yIndex+i < width && xIndex < height){
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];}
}
}
void cublas_manager::cuda_transpose(double* odata, const double* idata, const int width, const int height)
{
// Put df/dy in "normal" order
dim3 nBlocks2D,nThreads2D;
nThreads2D.x = TRANSPOSE_TILE_DIM;
nThreads2D.y = TRANSPOSE_BLOCK_ROWS;
nBlocks2D.x = (width+TRANSPOSE_TILE_DIM-1)/TRANSPOSE_TILE_DIM;
nBlocks2D.y = (height+TRANSPOSE_TILE_DIM-1)/TRANSPOSE_TILE_DIM;
CUBLAS_MANAGER_TransposeNoBankConflicts<<<nBlocks2D,nThreads2D>>>(odata,idata,width,height);
#ifdef ZERORK_FULL_DEBUG
cuda_err_check( cudaPeekAtLastError() );
cuda_err_check( cudaDeviceSynchronize() );
#endif
}
static void __global__ CUBLAS_MANAGER_cuda_bdmv_kernel
(
const int mtx_block_size,
const int num_mtx_blocks,
const double* A_dev,
const double* X_dev ,
double * Y_dev
)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int stride = gridDim.x*blockDim.x;
for( ; tidx < num_mtx_blocks*mtx_block_size; tidx += stride)
{
int local_row = tidx % mtx_block_size;
int local_block = tidx / mtx_block_size;
double Y_dev_accum = 0.0;
for(int i = 0; i < mtx_block_size; ++i) //columns
{
int data_idx = mtx_block_size*mtx_block_size*local_block + mtx_block_size*i + local_row;
Y_dev_accum += A_dev[data_idx]*X_dev[i+local_block*mtx_block_size];
}
Y_dev[local_row+local_block*mtx_block_size] = Y_dev_accum;
}
}
int cublas_manager::cuda_bdmv(int n, int nbatch, double* A_dev, double* B_dev, double* Y_dev)
{
int threads = std::min(n*nbatch,1024);
int blocks=(nbatch*n+threads-1)/threads;
CUBLAS_MANAGER_cuda_bdmv_kernel<<<blocks,threads>>>(n, nbatch, A_dev, B_dev, Y_dev);
#ifdef ZERORK_FULL_DEBUG
cuda_err_check(cudaPeekAtLastError());
cuda_err_check(cudaDeviceSynchronize());
#endif
return 0;
}
int cublas_manager::solve_invert(int num_batches, int n, const double* rhs, double* soln) {
if(n != n_ || num_batches != num_batches_) {
return 1;
}
// Transpose rhs into soln
cuda_transpose(soln,rhs,num_batches_,n_);
// Block-diagonal matrix vector multiplication
cuda_bdmv(n_, num_batches_, matrix_inverse_dev_, soln, tmp_dev_);
// Put tmp back into block order
cuda_transpose(soln,tmp_dev_,n_,num_batches_);
return(0);
}
int cublas_manager::solve_lu(int num_batches, int n, const double* rhs, double* soln) {
if(n != n_ || num_batches != num_batches_) {
return 1;
}
// Transpose rhs into tmp_dev_
cuda_transpose(tmp_dev_,rhs,num_batches_,n_);
// CUBLAS forward and back substitution
int* ipiv = NULL; //Turns off pivoting
int lda = n_;
int ldb = n_;
int info = 0;
cublasDgetrsBatched(cublas_handle_, CUBLAS_OP_N, n_, 1,
matrix_pointers_dev_, lda,
ipiv, tmp_pointers_dev_, ldb, &info, num_batches_);
// Put tmp back into block order
cuda_transpose(soln,tmp_dev_,n_,num_batches_);
return(0);
}
|
4ce4ac155df3ef4b41c78b1c63da13befd7a71bc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#define STOP 0
#define START 1
/* Play with the following two values */
#define NB 1000000L //Size of array (long integer)
#define MANY 200L //Number of transfers
/* (over-)Simple chronometer function */
void chrono (int kind, float *time) {
static clock_t counts;
if (kind == START) {
*time = 0.0;
counts = clock();
return;
}
if (kind == STOP) {
*time = ((float)(clock()-counts))/((float)CLOCKS_PER_SEC);
}
}
int main () {
float *ptr, *gpu_ptr;
hipError_t err;
float time, number_of_Gbytes;
long i, j;
/* Dynamic allocations below */
// Allocate ptr on host below
ptr = (float *)malloc(NB * sizeof(float));
// Allocate gpu_ptr on device below
err = hipMalloc(&gpu_ptr,sizeof(float)*NB);
/* Some error handling */
if (ptr == NULL) {
printf ("Not enough memory on host\n");
exit (1);
}
if (err != 0) {
printf ("Pb allocating memory on device. Reason:\n");
printf ("%s\n", hipGetErrorString (err));
exit (1);
}
/* Are the following two lines necessary ? */
for (i = 0; i < NB; i++)
ptr[i] = 9.0;
/* Transfer loop below */
chrono (START, &time);
for (j = 0; j < MANY; j++) {
// perform memory transfer here
hipMemcpy(gpu_ptr,ptr,sizeof(float)*NB,hipMemcpyHostToDevice);
}
chrono (STOP, &time);
/* Output results */
number_of_Gbytes = (MANY*(float)sizeof(float)*NB/1024./1024./1024.);
printf ("%f Gbytes transfered in %f seconds\n", number_of_Gbytes, time);
printf ("Bandwidth = %f Gb/s\n", number_of_Gbytes / time);
/* Clean the place */
hipFree (gpu_ptr);
free (ptr);
return 0;
}
|
4ce4ac155df3ef4b41c78b1c63da13befd7a71bc.cu
|
#include <cuda.h>
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#define STOP 0
#define START 1
/* Play with the following two values */
#define NB 1000000L //Size of array (long integer)
#define MANY 200L //Number of transfers
/* (over-)Simple chronometer function */
void chrono (int kind, float *time) {
static clock_t counts;
if (kind == START) {
*time = 0.0;
counts = clock();
return;
}
if (kind == STOP) {
*time = ((float)(clock()-counts))/((float)CLOCKS_PER_SEC);
}
}
int main () {
float *ptr, *gpu_ptr;
cudaError_t err;
float time, number_of_Gbytes;
long i, j;
/* Dynamic allocations below */
// Allocate ptr on host below
ptr = (float *)malloc(NB * sizeof(float));
// Allocate gpu_ptr on device below
err = cudaMalloc(&gpu_ptr,sizeof(float)*NB);
/* Some error handling */
if (ptr == NULL) {
printf ("Not enough memory on host\n");
exit (1);
}
if (err != 0) {
printf ("Pb allocating memory on device. Reason:\n");
printf ("%s\n", cudaGetErrorString (err));
exit (1);
}
/* Are the following two lines necessary ? */
for (i = 0; i < NB; i++)
ptr[i] = 9.0;
/* Transfer loop below */
chrono (START, &time);
for (j = 0; j < MANY; j++) {
// perform memory transfer here
cudaMemcpy(gpu_ptr,ptr,sizeof(float)*NB,cudaMemcpyHostToDevice);
}
chrono (STOP, &time);
/* Output results */
number_of_Gbytes = (MANY*(float)sizeof(float)*NB/1024./1024./1024.);
printf ("%f Gbytes transfered in %f seconds\n", number_of_Gbytes, time);
printf ("Bandwidth = %f Gb/s\n", number_of_Gbytes / time);
/* Clean the place */
cudaFree (gpu_ptr);
free (ptr);
return 0;
}
|
68ae5ec836698958b926298288cbf571a8e8ab43.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/utilities/error.hpp>
#include <strings/utilities.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/for_each.h>
#include <thrust/transform_reduce.h>
// clang-format off
namespace cudf {
// Create a strings-type column from vector of pointer/size pairs
std::unique_ptr<column> make_strings_column(
const rmm::device_vector<thrust::pair<const char*, size_type>>& strings,
hipStream_t stream,
rmm::mr::device_memory_resource* mr) {
CUDF_FUNC_RANGE();
size_type strings_count = strings.size();
if (strings_count == 0) return strings::detail::make_empty_strings_column(mr, stream);
auto execpol = rmm::exec_policy(stream);
auto d_strings = strings.data().get();
// check total size is not too large for cudf column
size_t bytes = thrust::transform_reduce(
execpol->on(stream),
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(strings_count),
[d_strings] __device__(size_t idx) {
auto item = d_strings[idx];
return (item.first != nullptr) ? item.second : 0;
},
0,
thrust::plus<size_t>());
CUDF_EXPECTS(bytes < std::numeric_limits<size_type>::max(),
"total size of strings is too large for cudf column");
// build offsets column from the strings sizes
auto offsets_transformer = [d_strings] __device__(size_type idx) {
thrust::pair<const char*, size_type> item = d_strings[idx];
return (item.first != nullptr ? static_cast<int32_t>(item.second) : 0);
};
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), offsets_transformer);
auto offsets_column = strings::detail::make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto offsets_view = offsets_column->view();
auto d_offsets = offsets_view.data<int32_t>();
// create null mask
auto new_nulls = detail::valid_if(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
[d_strings] __device__(size_type idx) { return d_strings[idx].first != nullptr; },
stream,
mr);
auto null_count = new_nulls.second;
rmm::device_buffer null_mask{0, stream, mr};
if (null_count > 0) null_mask = std::move(new_nulls.first);
// build chars column
auto chars_column =
strings::detail::create_chars_child_column(strings_count, null_count, bytes, mr, stream);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_strings, d_offsets, d_chars] __device__(size_type idx) {
// place individual strings
auto item = d_strings[idx];
if (item.first != nullptr)
memcpy(d_chars + d_offsets[idx], item.first, item.second);
});
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask),
stream,
mr);
}
struct string_view_to_pair {
string_view null_placeholder;
string_view_to_pair(string_view n) : null_placeholder(n) {}
__device__ thrust::pair<const char*, size_type> operator()(const string_view& i) {
return (i.data() == null_placeholder.data())
? thrust::pair<const char*, size_type>{nullptr, 0}
: thrust::pair<const char*, size_type>{i.data(), i.size_bytes()};
}
};
// Create a strings-type column from vector of string_view
std::unique_ptr<column> make_strings_column(const rmm::device_vector<string_view>& string_views,
const string_view null_placeholder,
hipStream_t stream,
rmm::mr::device_memory_resource* mr) {
auto it_pair =
thrust::make_transform_iterator(string_views.begin(), string_view_to_pair{null_placeholder});
const rmm::device_vector<thrust::pair<const char*, size_type>> dev_strings(
it_pair, it_pair + string_views.size());
return make_strings_column(dev_strings, stream, mr);
}
// Create a strings-type column from device vector of chars and vector of offsets.
std::unique_ptr<column> make_strings_column(const rmm::device_vector<char>& strings,
const rmm::device_vector<size_type>& offsets,
const rmm::device_vector<bitmask_type>& valid_mask,
size_type null_count,
hipStream_t stream,
rmm::mr::device_memory_resource* mr) {
CUDF_FUNC_RANGE();
size_type num_strings = offsets.size() - 1;
if (num_strings == 0) return strings::detail::make_empty_strings_column(mr, stream);
CUDF_EXPECTS(null_count < num_strings, "null strings column not yet supported");
if (null_count > 0) {
CUDF_EXPECTS(!valid_mask.empty(), "Cannot have null elements without a null mask.");
}
auto execpol = rmm::exec_policy(stream);
size_type bytes = offsets.back();
CUDF_EXPECTS(bytes >= 0, "invalid offsets vector");
// build offsets column -- this is the number of strings + 1
auto offsets_column =
make_numeric_column(data_type{type_id::INT32}, num_strings + 1, mask_state::UNALLOCATED, stream, mr);
auto offsets_view = offsets_column->mutable_view();
CUDA_TRY(hipMemcpyAsync(offsets_view.data<int32_t>(),
offsets.data().get(),
(num_strings + 1) * sizeof(int32_t),
hipMemcpyDeviceToDevice,
stream));
// build null bitmask
rmm::device_buffer null_mask{
valid_mask.data().get(),
valid_mask.size() *
sizeof(
bitmask_type)}; // Or this works too: sizeof(typename std::remove_reference_t<decltype(valid_mask)>::value_type)
// Following give the incorrect value of 8 instead of 4 because of smart references:
// sizeof(valid_mask[0]), sizeof(decltype(valid_mask.front()))
// build chars column
auto chars_column =
strings::detail::create_chars_child_column(num_strings, null_count, bytes, mr, stream);
auto chars_view = chars_column->mutable_view();
CUDA_TRY(hipMemcpyAsync(
chars_view.data<char>(), strings.data().get(), bytes, hipMemcpyDeviceToDevice, stream));
return make_strings_column(num_strings,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask),
stream,
mr);
}
// Create strings column from host vectors
std::unique_ptr<column> make_strings_column(const std::vector<char>& strings,
const std::vector<size_type>& offsets,
const std::vector<bitmask_type>& null_mask,
size_type null_count,
hipStream_t stream,
rmm::mr::device_memory_resource* mr) {
rmm::device_vector<char> d_strings{strings};
rmm::device_vector<size_type> d_offsets{offsets};
rmm::device_vector<bitmask_type> d_null_mask{null_mask};
return make_strings_column(d_strings, d_offsets, d_null_mask, null_count, stream, mr);
}
//
std::unique_ptr<column> make_strings_column(size_type num_strings,
std::unique_ptr<column> offsets_column,
std::unique_ptr<column> chars_column,
size_type null_count,
rmm::device_buffer&& null_mask,
hipStream_t stream,
rmm::mr::device_memory_resource* mr) {
if (null_count > 0) CUDF_EXPECTS(null_mask.size() > 0, "Column with nulls must be nullable.");
CUDF_EXPECTS(num_strings == offsets_column->size() - 1,
"Invalid offsets column size for strings column.");
CUDF_EXPECTS(offsets_column->null_count() == 0, "Offsets column should not contain nulls");
CUDF_EXPECTS(chars_column->null_count() == 0, "Chars column should not contain nulls");
std::vector<std::unique_ptr<column>> children;
children.emplace_back(std::move(offsets_column));
children.emplace_back(std::move(chars_column));
return std::make_unique<column>(data_type{type_id::STRING},
num_strings,
rmm::device_buffer{0, stream, mr},
null_mask,
null_count,
std::move(children));
}
} // namespace cudf
// clang-format on TODO fix
|
68ae5ec836698958b926298288cbf571a8e8ab43.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/utilities/error.hpp>
#include <strings/utilities.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/for_each.h>
#include <thrust/transform_reduce.h>
// clang-format off
namespace cudf {
// Create a strings-type column from vector of pointer/size pairs
std::unique_ptr<column> make_strings_column(
const rmm::device_vector<thrust::pair<const char*, size_type>>& strings,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr) {
CUDF_FUNC_RANGE();
size_type strings_count = strings.size();
if (strings_count == 0) return strings::detail::make_empty_strings_column(mr, stream);
auto execpol = rmm::exec_policy(stream);
auto d_strings = strings.data().get();
// check total size is not too large for cudf column
size_t bytes = thrust::transform_reduce(
execpol->on(stream),
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(strings_count),
[d_strings] __device__(size_t idx) {
auto item = d_strings[idx];
return (item.first != nullptr) ? item.second : 0;
},
0,
thrust::plus<size_t>());
CUDF_EXPECTS(bytes < std::numeric_limits<size_type>::max(),
"total size of strings is too large for cudf column");
// build offsets column from the strings sizes
auto offsets_transformer = [d_strings] __device__(size_type idx) {
thrust::pair<const char*, size_type> item = d_strings[idx];
return (item.first != nullptr ? static_cast<int32_t>(item.second) : 0);
};
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), offsets_transformer);
auto offsets_column = strings::detail::make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto offsets_view = offsets_column->view();
auto d_offsets = offsets_view.data<int32_t>();
// create null mask
auto new_nulls = detail::valid_if(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
[d_strings] __device__(size_type idx) { return d_strings[idx].first != nullptr; },
stream,
mr);
auto null_count = new_nulls.second;
rmm::device_buffer null_mask{0, stream, mr};
if (null_count > 0) null_mask = std::move(new_nulls.first);
// build chars column
auto chars_column =
strings::detail::create_chars_child_column(strings_count, null_count, bytes, mr, stream);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_strings, d_offsets, d_chars] __device__(size_type idx) {
// place individual strings
auto item = d_strings[idx];
if (item.first != nullptr)
memcpy(d_chars + d_offsets[idx], item.first, item.second);
});
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask),
stream,
mr);
}
struct string_view_to_pair {
string_view null_placeholder;
string_view_to_pair(string_view n) : null_placeholder(n) {}
__device__ thrust::pair<const char*, size_type> operator()(const string_view& i) {
return (i.data() == null_placeholder.data())
? thrust::pair<const char*, size_type>{nullptr, 0}
: thrust::pair<const char*, size_type>{i.data(), i.size_bytes()};
}
};
// Create a strings-type column from vector of string_view
std::unique_ptr<column> make_strings_column(const rmm::device_vector<string_view>& string_views,
const string_view null_placeholder,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr) {
auto it_pair =
thrust::make_transform_iterator(string_views.begin(), string_view_to_pair{null_placeholder});
const rmm::device_vector<thrust::pair<const char*, size_type>> dev_strings(
it_pair, it_pair + string_views.size());
return make_strings_column(dev_strings, stream, mr);
}
// Create a strings-type column from device vector of chars and vector of offsets.
std::unique_ptr<column> make_strings_column(const rmm::device_vector<char>& strings,
const rmm::device_vector<size_type>& offsets,
const rmm::device_vector<bitmask_type>& valid_mask,
size_type null_count,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr) {
CUDF_FUNC_RANGE();
size_type num_strings = offsets.size() - 1;
if (num_strings == 0) return strings::detail::make_empty_strings_column(mr, stream);
CUDF_EXPECTS(null_count < num_strings, "null strings column not yet supported");
if (null_count > 0) {
CUDF_EXPECTS(!valid_mask.empty(), "Cannot have null elements without a null mask.");
}
auto execpol = rmm::exec_policy(stream);
size_type bytes = offsets.back();
CUDF_EXPECTS(bytes >= 0, "invalid offsets vector");
// build offsets column -- this is the number of strings + 1
auto offsets_column =
make_numeric_column(data_type{type_id::INT32}, num_strings + 1, mask_state::UNALLOCATED, stream, mr);
auto offsets_view = offsets_column->mutable_view();
CUDA_TRY(cudaMemcpyAsync(offsets_view.data<int32_t>(),
offsets.data().get(),
(num_strings + 1) * sizeof(int32_t),
cudaMemcpyDeviceToDevice,
stream));
// build null bitmask
rmm::device_buffer null_mask{
valid_mask.data().get(),
valid_mask.size() *
sizeof(
bitmask_type)}; // Or this works too: sizeof(typename std::remove_reference_t<decltype(valid_mask)>::value_type)
// Following give the incorrect value of 8 instead of 4 because of smart references:
// sizeof(valid_mask[0]), sizeof(decltype(valid_mask.front()))
// build chars column
auto chars_column =
strings::detail::create_chars_child_column(num_strings, null_count, bytes, mr, stream);
auto chars_view = chars_column->mutable_view();
CUDA_TRY(cudaMemcpyAsync(
chars_view.data<char>(), strings.data().get(), bytes, cudaMemcpyDeviceToDevice, stream));
return make_strings_column(num_strings,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask),
stream,
mr);
}
// Create strings column from host vectors
std::unique_ptr<column> make_strings_column(const std::vector<char>& strings,
const std::vector<size_type>& offsets,
const std::vector<bitmask_type>& null_mask,
size_type null_count,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr) {
rmm::device_vector<char> d_strings{strings};
rmm::device_vector<size_type> d_offsets{offsets};
rmm::device_vector<bitmask_type> d_null_mask{null_mask};
return make_strings_column(d_strings, d_offsets, d_null_mask, null_count, stream, mr);
}
//
std::unique_ptr<column> make_strings_column(size_type num_strings,
std::unique_ptr<column> offsets_column,
std::unique_ptr<column> chars_column,
size_type null_count,
rmm::device_buffer&& null_mask,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr) {
if (null_count > 0) CUDF_EXPECTS(null_mask.size() > 0, "Column with nulls must be nullable.");
CUDF_EXPECTS(num_strings == offsets_column->size() - 1,
"Invalid offsets column size for strings column.");
CUDF_EXPECTS(offsets_column->null_count() == 0, "Offsets column should not contain nulls");
CUDF_EXPECTS(chars_column->null_count() == 0, "Chars column should not contain nulls");
std::vector<std::unique_ptr<column>> children;
children.emplace_back(std::move(offsets_column));
children.emplace_back(std::move(chars_column));
return std::make_unique<column>(data_type{type_id::STRING},
num_strings,
rmm::device_buffer{0, stream, mr},
null_mask,
null_count,
std::move(children));
}
} // namespace cudf
// clang-format on TODO fix
|
ProduitScalaireKernel.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cudaTools.h"
#include "Indice2D.h"
#include "Indice1D.h"
#include "Device.h"
#include "Lock.h"
#include "FonctionsProduitScalaire.h"
#include "hip/hip_runtime.h"
#include <cmath>
#include <cstdio>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void kernelProduitScalaire(int n, Lock lock, float* ptrDevResult);
__device__ void reductionIntraBlock(float* tabSM, int n);
__device__ void reductionInterBlock(float* tabSM, Lock &lock, float* ptrDevResult);
__device__ void ecrasement(float* tabSM, int moitie);
__device__ void fillBlock(float* tabSM, int n);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
void launchKernelProduitScalaire(int n, float &resultat)
{
int sizeTabPerBlock = n/16;
dim3 dg = dim3(16, 1, 1);
dim3 db = dim3(sizeTabPerBlock, 1, 1);
Device::assertDim(dg, db);
//Taille de tabSM en Shared Memory
size_t size = sizeof(float) * sizeTabPerBlock;
Lock lock;
float *ptrDevResult;
// Device memory allocation in GM
HANDLE_ERROR(hipMalloc((void**) &ptrDevResult, sizeof(float)));
HANDLE_ERROR(hipMemset(ptrDevResult, 0, sizeof(float)));
hipLaunchKernelGGL(( kernelProduitScalaire), dim3(dg),dim3(db),size, 0, n/16, lock, ptrDevResult);
// Device -> Host
HANDLE_ERROR(hipMemcpy(&resultat, ptrDevResult, sizeof(float), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipFree(ptrDevResult));
}
__global__ void kernelProduitScalaire(int n, Lock lock, float *ptrDevResult)
{
extern __shared__ float tabSM[];
fillBlock(tabSM, n);
__syncthreads();
reductionIntraBlock(tabSM, n);
reductionInterBlock(tabSM, lock, ptrDevResult);
__syncthreads();
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void fillBlock(float* tabSM, int n)
{
int tidLocal = threadIdx.x;
int i = Indice1D::tid();
if (tidLocal < n)
{
tabSM[tidLocal] = v(i) * w(i);
}
}
__device__ void reductionIntraBlock(float* tabSM, int n)
{
int moitie = n / 2;
while(moitie >= 1)
{
ecrasement(tabSM, moitie);
moitie /= 2;
__syncthreads();
}
}
__device__ void reductionInterBlock(float* tabSM, Lock &lock, float* ptrDevResult)
{
int tidLocal = threadIdx.x;
if(tidLocal == 0)
{
lock.lock();
*ptrDevResult += tabSM[0];
lock.unlock();
}
}
__device__ void ecrasement(float* tabSM, int moitie)
{
int tidLocal = threadIdx.x;
int i = tidLocal;
if(i < moitie)
{
tabSM[i] = tabSM[i] + tabSM[i + moitie];
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
ProduitScalaireKernel.cu
|
#include "cudaTools.h"
#include "Indice2D.h"
#include "Indice1D.h"
#include "Device.h"
#include "Lock.h"
#include "FonctionsProduitScalaire.h"
#include "cuda.h"
#include <cmath>
#include <cstdio>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void kernelProduitScalaire(int n, Lock lock, float* ptrDevResult);
__device__ void reductionIntraBlock(float* tabSM, int n);
__device__ void reductionInterBlock(float* tabSM, Lock &lock, float* ptrDevResult);
__device__ void ecrasement(float* tabSM, int moitie);
__device__ void fillBlock(float* tabSM, int n);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
void launchKernelProduitScalaire(int n, float &resultat)
{
int sizeTabPerBlock = n/16;
dim3 dg = dim3(16, 1, 1);
dim3 db = dim3(sizeTabPerBlock, 1, 1);
Device::assertDim(dg, db);
//Taille de tabSM en Shared Memory
size_t size = sizeof(float) * sizeTabPerBlock;
Lock lock;
float *ptrDevResult;
// Device memory allocation in GM
HANDLE_ERROR(cudaMalloc((void**) &ptrDevResult, sizeof(float)));
HANDLE_ERROR(cudaMemset(ptrDevResult, 0, sizeof(float)));
kernelProduitScalaire<<<dg,db,size>>>(n/16, lock, ptrDevResult);
// Device -> Host
HANDLE_ERROR(cudaMemcpy(&resultat, ptrDevResult, sizeof(float), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaFree(ptrDevResult));
}
__global__ void kernelProduitScalaire(int n, Lock lock, float *ptrDevResult)
{
extern __shared__ float tabSM[];
fillBlock(tabSM, n);
__syncthreads();
reductionIntraBlock(tabSM, n);
reductionInterBlock(tabSM, lock, ptrDevResult);
__syncthreads();
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void fillBlock(float* tabSM, int n)
{
int tidLocal = threadIdx.x;
int i = Indice1D::tid();
if (tidLocal < n)
{
tabSM[tidLocal] = v(i) * w(i);
}
}
__device__ void reductionIntraBlock(float* tabSM, int n)
{
int moitie = n / 2;
while(moitie >= 1)
{
ecrasement(tabSM, moitie);
moitie /= 2;
__syncthreads();
}
}
__device__ void reductionInterBlock(float* tabSM, Lock &lock, float* ptrDevResult)
{
int tidLocal = threadIdx.x;
if(tidLocal == 0)
{
lock.lock();
*ptrDevResult += tabSM[0];
lock.unlock();
}
}
__device__ void ecrasement(float* tabSM, int moitie)
{
int tidLocal = threadIdx.x;
int i = tidLocal;
if(i < moitie)
{
tabSM[i] = tabSM[i] + tabSM[i + moitie];
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
2133e35e4d181b1f015acf58886b4d05b5362ebb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "transpose_softmax.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *odata = NULL;
hipMalloc(&odata, XSIZE*YSIZE);
float *idata = NULL;
hipMalloc(&idata, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
transpose_softmax), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
transpose_softmax), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
transpose_softmax), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
2133e35e4d181b1f015acf58886b4d05b5362ebb.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "transpose_softmax.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *odata = NULL;
cudaMalloc(&odata, XSIZE*YSIZE);
float *idata = NULL;
cudaMalloc(&idata, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
transpose_softmax<<<gridBlock,threadBlock>>>(odata,idata,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
transpose_softmax<<<gridBlock,threadBlock>>>(odata,idata,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
transpose_softmax<<<gridBlock,threadBlock>>>(odata,idata,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
9473f9465521279f49a9348709cbc099f4fcd7e3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "TW_cuTempUtils.h"
#include "TW.h"
#include <hip/hip_runtime.h>
namespace TW{
/// \brief CUDA kernel to initialize an array
///
/// \param devPtr device pointer holding the array
/// \param val the value used to initialize the array elements
/// \param nwords number of bytes to be initialized
template<typename T>
__global__ void initKernel(T * devPtr, const T val, const size_t nwords)
{
int tidx = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for(; tidx < nwords; tidx += stride)
devPtr[tidx] = val;
}
template<typename T>
void cuInitialize(T* devPtr, const T val, const size_t nwords)
{
hipLaunchKernelGGL(( initKernel<T>), dim3(256), dim3(64), 0, 0, devPtr, val, nwords);
}
template TW_LIB_DLL_EXPORTS void cuInitialize<float>(float *devPtr, const float val, const size_t nwords);
template TW_LIB_DLL_EXPORTS void cuInitialize<double>(double *devPtr, const double val, const size_t nwords);
template TW_LIB_DLL_EXPORTS void cuInitialize<int>(int *devPtr, const int val, const size_t nwords);
template TW_LIB_DLL_EXPORTS void cuInitialize<unsigned int>(unsigned int *devPtr, const unsigned int val, const size_t nwords);
template TW_LIB_DLL_EXPORTS void cuInitialize<uchar1>(uchar1 *devPtr, const uchar1 val, const size_t nwords);
template TW_LIB_DLL_EXPORTS void cuInitialize<uchar2>(uchar2 *devPtr, const uchar2 val, const size_t nwords);
template TW_LIB_DLL_EXPORTS void cuInitialize<uchar3>(uchar3 *devPtr, const uchar3 val, const size_t nwords);
template TW_LIB_DLL_EXPORTS void cuInitialize<uchar4>(uchar4 *devPtr, const uchar4 val, const size_t nwords);
}
|
9473f9465521279f49a9348709cbc099f4fcd7e3.cu
|
#include "TW_cuTempUtils.h"
#include "TW.h"
#include <cuda_runtime.h>
namespace TW{
/// \brief CUDA kernel to initialize an array
///
/// \param devPtr device pointer holding the array
/// \param val the value used to initialize the array elements
/// \param nwords number of bytes to be initialized
template<typename T>
__global__ void initKernel(T * devPtr, const T val, const size_t nwords)
{
int tidx = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for(; tidx < nwords; tidx += stride)
devPtr[tidx] = val;
}
template<typename T>
void cuInitialize(T* devPtr, const T val, const size_t nwords)
{
initKernel<T><<<256, 64>>>(devPtr, val, nwords);
}
template TW_LIB_DLL_EXPORTS void cuInitialize<float>(float *devPtr, const float val, const size_t nwords);
template TW_LIB_DLL_EXPORTS void cuInitialize<double>(double *devPtr, const double val, const size_t nwords);
template TW_LIB_DLL_EXPORTS void cuInitialize<int>(int *devPtr, const int val, const size_t nwords);
template TW_LIB_DLL_EXPORTS void cuInitialize<unsigned int>(unsigned int *devPtr, const unsigned int val, const size_t nwords);
template TW_LIB_DLL_EXPORTS void cuInitialize<uchar1>(uchar1 *devPtr, const uchar1 val, const size_t nwords);
template TW_LIB_DLL_EXPORTS void cuInitialize<uchar2>(uchar2 *devPtr, const uchar2 val, const size_t nwords);
template TW_LIB_DLL_EXPORTS void cuInitialize<uchar3>(uchar3 *devPtr, const uchar3 val, const size_t nwords);
template TW_LIB_DLL_EXPORTS void cuInitialize<uchar4>(uchar4 *devPtr, const uchar4 val, const size_t nwords);
}
|
1c1d6b8b09c71609fb066ffd017ae4af83c84a8a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include "paddle/phi/kernels/norm_kernel.h"
#ifdef __NVCC__
#include "hipcub/hipcub.hpp"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/common_shape.h"
namespace phi {
__device__ __forceinline__ dtype::float16 square_root(dtype::float16 x) {
return static_cast<dtype::float16>(sqrtf(static_cast<float>(x)));
}
__device__ __forceinline__ float square_root(float x) { return sqrtf(x); }
__device__ __forceinline__ double square_root(double x) { return sqrt(x); }
template <typename T, int BlockDim>
__global__ void Normalize(const T* x,
const int pre,
const int axis_n, // dim in axis
const int post,
const T eps,
T* y,
T* out_norm) {
using MT = typename paddle::operators::details::MPTypeTrait<T>::Type;
typedef hipcub::BlockReduce<MT, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int num = pre * post;
for (int i = blockIdx.x; i < num; i += gridDim.x) {
int base = (i / post) * post * axis_n + (i % post);
MT sum = 0.0;
__shared__ MT norm;
for (int j = threadIdx.x; j < axis_n; j += blockDim.x) {
const MT x_ij = static_cast<MT>(x[base + j * post]);
sum += x_ij * x_ij;
}
MT reduce_result = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
norm = square_root(reduce_result + static_cast<MT>(eps));
out_norm[i] = static_cast<T>(norm);
}
__syncthreads();
for (int j = threadIdx.x; j < axis_n; j += blockDim.x) {
const int index = base + j * post;
y[index] = static_cast<T>((static_cast<MT>(x[index]) / norm));
}
}
}
template <typename T, typename Context>
void NormKernel(const Context& ctx,
const DenseTensor& x,
int axis,
float epsilon,
bool is_test,
DenseTensor* out,
DenseTensor* norm) {
auto* in_x = &x;
auto* out_y = out;
auto xdim = in_x->dims();
if (axis < 0) axis = xdim.size() + axis;
T eps = static_cast<T>(epsilon);
DenseTensor* out_norm;
DenseTensor out_norm_tmp;
if (is_test) {
auto out_dim = in_x->dims();
out_dim[axis] = 1;
out_norm = &out_norm_tmp;
out_norm->Resize(out_dim);
} else {
out_norm = norm;
}
const T* x_ptr = in_x->data<T>();
ctx.template Alloc<T>(out_y);
ctx.template Alloc<T>(out_norm);
T* y = out_y->data<T>();
T* norm_ptr = out_norm->data<T>();
int pre, n, post;
funcs::GetPrePostNumel(xdim, axis, &pre, &n, &post);
#ifdef __HIPCC__
const int block = 256;
#else
const int block = 512;
#endif
int max_threads = ctx.GetMaxPhysicalThreadCount();
const int max_blocks = ::max(max_threads / block, 1);
int grid = ::min(max_blocks, pre * post);
hipLaunchKernelGGL(( Normalize<T, block>)
, dim3(grid), dim3(block), 0, ctx.stream(), x_ptr, pre, n, post, eps, y, norm_ptr);
}
} // namespace phi
PD_REGISTER_KERNEL(norm,
GPU,
ALL_LAYOUT,
phi::NormKernel,
float,
double,
phi::dtype::float16) {}
|
1c1d6b8b09c71609fb066ffd017ae4af83c84a8a.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include "paddle/phi/kernels/norm_kernel.h"
#ifdef __NVCC__
#include "cub/cub.cuh"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/common_shape.h"
namespace phi {
__device__ __forceinline__ dtype::float16 square_root(dtype::float16 x) {
return static_cast<dtype::float16>(sqrtf(static_cast<float>(x)));
}
__device__ __forceinline__ float square_root(float x) { return sqrtf(x); }
__device__ __forceinline__ double square_root(double x) { return sqrt(x); }
template <typename T, int BlockDim>
__global__ void Normalize(const T* x,
const int pre,
const int axis_n, // dim in axis
const int post,
const T eps,
T* y,
T* out_norm) {
using MT = typename paddle::operators::details::MPTypeTrait<T>::Type;
typedef cub::BlockReduce<MT, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int num = pre * post;
for (int i = blockIdx.x; i < num; i += gridDim.x) {
int base = (i / post) * post * axis_n + (i % post);
MT sum = 0.0;
__shared__ MT norm;
for (int j = threadIdx.x; j < axis_n; j += blockDim.x) {
const MT x_ij = static_cast<MT>(x[base + j * post]);
sum += x_ij * x_ij;
}
MT reduce_result = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
norm = square_root(reduce_result + static_cast<MT>(eps));
out_norm[i] = static_cast<T>(norm);
}
__syncthreads();
for (int j = threadIdx.x; j < axis_n; j += blockDim.x) {
const int index = base + j * post;
y[index] = static_cast<T>((static_cast<MT>(x[index]) / norm));
}
}
}
template <typename T, typename Context>
void NormKernel(const Context& ctx,
const DenseTensor& x,
int axis,
float epsilon,
bool is_test,
DenseTensor* out,
DenseTensor* norm) {
auto* in_x = &x;
auto* out_y = out;
auto xdim = in_x->dims();
if (axis < 0) axis = xdim.size() + axis;
T eps = static_cast<T>(epsilon);
DenseTensor* out_norm;
DenseTensor out_norm_tmp;
if (is_test) {
auto out_dim = in_x->dims();
out_dim[axis] = 1;
out_norm = &out_norm_tmp;
out_norm->Resize(out_dim);
} else {
out_norm = norm;
}
const T* x_ptr = in_x->data<T>();
ctx.template Alloc<T>(out_y);
ctx.template Alloc<T>(out_norm);
T* y = out_y->data<T>();
T* norm_ptr = out_norm->data<T>();
int pre, n, post;
funcs::GetPrePostNumel(xdim, axis, &pre, &n, &post);
#ifdef __HIPCC__
const int block = 256;
#else
const int block = 512;
#endif
int max_threads = ctx.GetMaxPhysicalThreadCount();
const int max_blocks = std::max(max_threads / block, 1);
int grid = std::min(max_blocks, pre * post);
Normalize<T, block>
<<<grid, block, 0, ctx.stream()>>>(x_ptr, pre, n, post, eps, y, norm_ptr);
}
} // namespace phi
PD_REGISTER_KERNEL(norm,
GPU,
ALL_LAYOUT,
phi::NormKernel,
float,
double,
phi::dtype::float16) {}
|
4e163287066d02b1f8ef6bc394298a0a9cdae81a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#ifdef _FLOAT_
typedef float basetype; // Tipo para elementos: float
#define labelelem "ints"
#elif _DOUBLE_
typedef double basetype; // Tipo para elementos: double
#define labelelem "doubles"
#else
typedef int basetype; // Tipo para elementos: int PREDETERMINADO
#define labelelem "floats"
#endif
#include <sys/time.h>
#include <sys/resource.h>
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
/*
Funcin para inicializar el vector que vamos a utilizar
*/
void init_CPU_array(basetype array[], const unsigned int n)
{
unsigned int i;
for(i = 0; i < n; i++) {
array[i] = (basetype)i;
}
}
void init2_CPU_array(basetype array[], const unsigned int n)
{
unsigned int i;
for(i = 0; i < n; i++) {
array[i] = ((basetype)i) * 2;
}
}
int sum_CPU_array(basetype *a, basetype *b, basetype *c, int n)
{
for (int i=0; i<n; i++){
c[i]=a[i]+b[i];
}
return 0;
}
int print_CPU_array(basetype *c, int n)
{
for (int i=0; i<n; i++){
printf("%d\n",c[i]);
}
return 0;
}
__global__ void suma_kernel_cuda(basetype * a, basetype * b, basetype *c, const int n, const int cant, int threads){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
// no tocar, no hace nada pero no tocar si aprecias tu vida y tu sanidad
//printf("soy %d\n", global_id);
if (global_id < n){
for(int i=0; i<cant ; i++){
c[global_id+(threads*i)] = a[global_id+(threads*i)]+b[global_id+(threads*i)];
}
}
}
int sum_GPU_array(basetype *a,basetype *b,basetype *c,int n,int cb)
{
int cant_positions=16;
double timetick;
int blk_size=cb;
hipError_t error;
// Nmero de bytes de cada uno de nuestros vectores
unsigned int numBytes = n * sizeof(basetype);
// Reservamos memoria global del device (GPU) para el array y lo copiamos
basetype *ga, *gb, *gc;
timetick = dwalltime();
hipMalloc((void **) &ga, numBytes);
hipMalloc((void **) &gb, numBytes);
hipMalloc((void **) &gc, numBytes);
printf("-> Tiempo de alocacion en memoria global de GPU %f\n", dwalltime() - timetick);
timetick = dwalltime();
hipMemcpy(ga, a, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(gb, b, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
printf("-> Tiempo de copia de memoria CPU =>> GPU %f\n", dwalltime() - timetick);
// Bloque unidimensional de hilos (*blk_size* hilos)
dim3 dimBlock(blk_size);
// Grid unidimensional (*ceil(n/blk_size)* bloques)
dim3 dimGrid((((n + dimBlock.x - 1) / dimBlock.x))/cant_positions);
printf("%d---%d---%d\n",blk_size, (((n + dimBlock.x - 1) / dimBlock.x))/cant_positions, blk_size* (((n + dimBlock.x - 1) / dimBlock.x))/cant_positions);
int threads=n/cant_positions;
printf("%d threads\n", threads);
//printf("%d %d",dimBlock.x,dimGrid.x);
// Lanzamos ejecucin del kernel en la GPU
//timestamp(start); // Medimos tiempo de clculo en GPU
timetick = dwalltime();
hipLaunchKernelGGL(( suma_kernel_cuda), dim3(dimGrid), dim3(dimBlock), 0, 0, ga,gb,gc,n,cant_positions,threads);
error=hipDeviceSynchronize();
printf("%s\n", hipGetErrorString(error));
printf("-> Tiempo de ejecucion en GPU %f\n", dwalltime() - timetick);
//timestamp(end);
// Movemos resultado: GPU -> CPU
timetick = dwalltime();
hipMemcpy(c, gc, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU
printf("-> Tiempo de copia GPU ==>> CPU %f\n", dwalltime() - timetick);
// Liberamos memoria global del device utilizada
hipFree (ga);
hipFree (gb);
hipFree (gc);
int aux=0;
for (int i=0;i<n;i++){
//printf("%d+%d=%d---%d\n", a[i],b[i],c[i], ((a[i]+b[i])==c[i]));
//printf("%d\n", 1==1);
//if ((a[i]+b[i])!=c[i]) printf("%g\n", c[i]);
if ((a[i]+b[i])!=c[i]){
//printf("%d+%d=%d---%d +++ %d\n", a[i],b[i],c[i], ((a[i]+b[i])==c[i]),i);
aux++;
}
}
printf("%d fallidos\n",aux);
return 0;
}
int main()
{
int n=4096*4096;
int size= sizeof(basetype);
basetype *a, *b, *c;
double timetick;
int cb=64;
// Reservamos e inicializamos el vector en CPU
timetick = dwalltime();
a = (basetype *) malloc(n*size);
b = (basetype *) malloc(n*size);
c = (basetype *) malloc(n*size);
init_CPU_array(a,n);
init2_CPU_array(b,n);
printf("-> Tiempo de alocar memoria e inicializar vectores en CPU %f\n", dwalltime() - timetick);
timetick = dwalltime();
sum_CPU_array(a,b,c,n);
//print_CPU_array(c,n);
printf("-> Tiempo de alocar memoria e inicializar vectores en CPU %f\n", dwalltime() - timetick);
//init_CPU_array(c,n);
//print_CPU_array(c,n);
sum_GPU_array(a,b,c,n,cb);
free(a);
free(b);
free(c);
return 0;
}
|
4e163287066d02b1f8ef6bc394298a0a9cdae81a.cu
|
#include <stdio.h>
#include <stdlib.h>
#ifdef _FLOAT_
typedef float basetype; // Tipo para elementos: float
#define labelelem "ints"
#elif _DOUBLE_
typedef double basetype; // Tipo para elementos: double
#define labelelem "doubles"
#else
typedef int basetype; // Tipo para elementos: int PREDETERMINADO
#define labelelem "floats"
#endif
#include <sys/time.h>
#include <sys/resource.h>
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
/*
Función para inicializar el vector que vamos a utilizar
*/
void init_CPU_array(basetype array[], const unsigned int n)
{
unsigned int i;
for(i = 0; i < n; i++) {
array[i] = (basetype)i;
}
}
void init2_CPU_array(basetype array[], const unsigned int n)
{
unsigned int i;
for(i = 0; i < n; i++) {
array[i] = ((basetype)i) * 2;
}
}
int sum_CPU_array(basetype *a, basetype *b, basetype *c, int n)
{
for (int i=0; i<n; i++){
c[i]=a[i]+b[i];
}
return 0;
}
int print_CPU_array(basetype *c, int n)
{
for (int i=0; i<n; i++){
printf("%d\n",c[i]);
}
return 0;
}
__global__ void suma_kernel_cuda(basetype * a, basetype * b, basetype *c, const int n, const int cant, int threads){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
// no tocar, no hace nada pero no tocar si aprecias tu vida y tu sanidad
//printf("soy %d\n", global_id);
if (global_id < n){
for(int i=0; i<cant ; i++){
c[global_id+(threads*i)] = a[global_id+(threads*i)]+b[global_id+(threads*i)];
}
}
}
int sum_GPU_array(basetype *a,basetype *b,basetype *c,int n,int cb)
{
int cant_positions=16;
double timetick;
int blk_size=cb;
cudaError_t error;
// Número de bytes de cada uno de nuestros vectores
unsigned int numBytes = n * sizeof(basetype);
// Reservamos memoria global del device (GPU) para el array y lo copiamos
basetype *ga, *gb, *gc;
timetick = dwalltime();
cudaMalloc((void **) &ga, numBytes);
cudaMalloc((void **) &gb, numBytes);
cudaMalloc((void **) &gc, numBytes);
printf("-> Tiempo de alocacion en memoria global de GPU %f\n", dwalltime() - timetick);
timetick = dwalltime();
cudaMemcpy(ga, a, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(gb, b, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
printf("-> Tiempo de copia de memoria CPU =>> GPU %f\n", dwalltime() - timetick);
// Bloque unidimensional de hilos (*blk_size* hilos)
dim3 dimBlock(blk_size);
// Grid unidimensional (*ceil(n/blk_size)* bloques)
dim3 dimGrid((((n + dimBlock.x - 1) / dimBlock.x))/cant_positions);
printf("%d---%d---%d\n",blk_size, (((n + dimBlock.x - 1) / dimBlock.x))/cant_positions, blk_size* (((n + dimBlock.x - 1) / dimBlock.x))/cant_positions);
int threads=n/cant_positions;
printf("%d threads\n", threads);
//printf("%d %d",dimBlock.x,dimGrid.x);
// Lanzamos ejecución del kernel en la GPU
//timestamp(start); // Medimos tiempo de cálculo en GPU
timetick = dwalltime();
suma_kernel_cuda<<<dimGrid, dimBlock>>>(ga,gb,gc,n,cant_positions,threads);
error=cudaDeviceSynchronize();
printf("%s\n", cudaGetErrorString(error));
printf("-> Tiempo de ejecucion en GPU %f\n", dwalltime() - timetick);
//timestamp(end);
// Movemos resultado: GPU -> CPU
timetick = dwalltime();
cudaMemcpy(c, gc, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
printf("-> Tiempo de copia GPU ==>> CPU %f\n", dwalltime() - timetick);
// Liberamos memoria global del device utilizada
cudaFree (ga);
cudaFree (gb);
cudaFree (gc);
int aux=0;
for (int i=0;i<n;i++){
//printf("%d+%d=%d---%d\n", a[i],b[i],c[i], ((a[i]+b[i])==c[i]));
//printf("%d\n", 1==1);
//if ((a[i]+b[i])!=c[i]) printf("%g\n", c[i]);
if ((a[i]+b[i])!=c[i]){
//printf("%d+%d=%d---%d +++ %d\n", a[i],b[i],c[i], ((a[i]+b[i])==c[i]),i);
aux++;
}
}
printf("%d fallidos\n",aux);
return 0;
}
int main()
{
int n=4096*4096;
int size= sizeof(basetype);
basetype *a, *b, *c;
double timetick;
int cb=64;
// Reservamos e inicializamos el vector en CPU
timetick = dwalltime();
a = (basetype *) malloc(n*size);
b = (basetype *) malloc(n*size);
c = (basetype *) malloc(n*size);
init_CPU_array(a,n);
init2_CPU_array(b,n);
printf("-> Tiempo de alocar memoria e inicializar vectores en CPU %f\n", dwalltime() - timetick);
timetick = dwalltime();
sum_CPU_array(a,b,c,n);
//print_CPU_array(c,n);
printf("-> Tiempo de alocar memoria e inicializar vectores en CPU %f\n", dwalltime() - timetick);
//init_CPU_array(c,n);
//print_CPU_array(c,n);
sum_GPU_array(a,b,c,n,cb);
free(a);
free(b);
free(c);
return 0;
}
|
c447383ff5a685fe846ff1fc676f025dd0efca23.hip
|
// !!! This is a file automatically generated by hipify!!!
// test calling kernels from different threads, in parallel (can be different kernels, or same. either way, should work, not crash :-) )
#include "pthread.h"
#include "hostside_opencl_funcs.h"
#include <iostream>
#include <memory>
#include <cassert>
#include <sstream>
using namespace std;
#include <hip/hip_runtime.h>
const int N = 1024;
__global__ void getValue(float *outdata, float *indata) {
outdata[0] = indata == 0 ? 3.0f : 2.0f;
}
template<typename T>
static std::string toString(T val) {
std::ostringstream myostringstream;
myostringstream << val;
return myostringstream.str();
}
pthread_mutex_t print_mutex = PTHREAD_MUTEX_INITIALIZER;
void print(string message) {
pthread_mutex_lock(&print_mutex);
cout << message << endl;
pthread_mutex_unlock(&print_mutex);
}
void *thread_func(void *data) {
int i = (size_t)data;
print("thread " + toString(i));
hipStream_t stream;
hipStreamCreate__(&stream, 0);
float *hostFloats1;
hipHostMalloc((void **)&hostFloats1, N * sizeof(float), HIP_MEMHOSTALLOC_PORTABLE);
hipDeviceptr_t deviceFloats1;
cuMemAlloc(&deviceFloats1, N * sizeof(float));
hipLaunchKernelGGL(( getValue), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, ((float *)deviceFloats1), 0);
hipLaunchKernelGGL(( getValue), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, ((float *)deviceFloats1), 0);
hipLaunchKernelGGL(( getValue), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, ((float *)deviceFloats1), 0);
hipLaunchKernelGGL(( getValue), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, ((float *)deviceFloats1), 0);
hipStreamSynchronize(stream);
print("num kernels cached " + toString(cocl::getNumCachedKernels()));
print("num kernels calls " + toString(cocl::getNumKernelCalls()));
assert(cocl::getNumCachedKernels() == 1);
assert(cocl::getNumKernelCalls() == 4);
hipHostFree(hostFloats1);
hipFree(deviceFloats1);
hipStreamDestroy(stream);
return 0;
}
void testfloatstar() {
const int NUM_THREADS = 4;
pthread_t threads[ NUM_THREADS ];
for(long long i = 0; i < NUM_THREADS; i++) {
pthread_create(&threads[i], NULL, thread_func, (void *)i);
}
cout << "creaed threads" << endl;
for(int i = 0; i < NUM_THREADS; i++) {
pthread_join(threads[i], NULL);
cout << "joined thread " << i << endl;
}
}
int main(int argc, char *argv[]) {
testfloatstar();
return 0;
}
|
c447383ff5a685fe846ff1fc676f025dd0efca23.cu
|
// test calling kernels from different threads, in parallel (can be different kernels, or same. either way, should work, not crash :-) )
#include "pthread.h"
#include "hostside_opencl_funcs.h"
#include <iostream>
#include <memory>
#include <cassert>
#include <sstream>
using namespace std;
#include <cuda.h>
const int N = 1024;
__global__ void getValue(float *outdata, float *indata) {
outdata[0] = indata == 0 ? 3.0f : 2.0f;
}
template<typename T>
static std::string toString(T val) {
std::ostringstream myostringstream;
myostringstream << val;
return myostringstream.str();
}
pthread_mutex_t print_mutex = PTHREAD_MUTEX_INITIALIZER;
void print(string message) {
pthread_mutex_lock(&print_mutex);
cout << message << endl;
pthread_mutex_unlock(&print_mutex);
}
void *thread_func(void *data) {
int i = (size_t)data;
print("thread " + toString(i));
CUstream stream;
cuStreamCreate(&stream, 0);
float *hostFloats1;
cuMemHostAlloc((void **)&hostFloats1, N * sizeof(float), CU_MEMHOSTALLOC_PORTABLE);
CUdeviceptr deviceFloats1;
cuMemAlloc(&deviceFloats1, N * sizeof(float));
getValue<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(((float *)deviceFloats1), 0);
getValue<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(((float *)deviceFloats1), 0);
getValue<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(((float *)deviceFloats1), 0);
getValue<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(((float *)deviceFloats1), 0);
cuStreamSynchronize(stream);
print("num kernels cached " + toString(cocl::getNumCachedKernels()));
print("num kernels calls " + toString(cocl::getNumKernelCalls()));
assert(cocl::getNumCachedKernels() == 1);
assert(cocl::getNumKernelCalls() == 4);
cuMemFreeHost(hostFloats1);
cuMemFree(deviceFloats1);
cuStreamDestroy(stream);
return 0;
}
void testfloatstar() {
const int NUM_THREADS = 4;
pthread_t threads[ NUM_THREADS ];
for(long long i = 0; i < NUM_THREADS; i++) {
pthread_create(&threads[i], NULL, thread_func, (void *)i);
}
cout << "creaed threads" << endl;
for(int i = 0; i < NUM_THREADS; i++) {
pthread_join(threads[i], NULL);
cout << "joined thread " << i << endl;
}
}
int main(int argc, char *argv[]) {
testfloatstar();
return 0;
}
|
dda77c4fbcf079200ad90bb144160b018af080e7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* steps.cpp
*
* Description : GPU steps
*
* Created on : 01.Mar.2012
* Author : Orhan Firat
* Department of Computer Engineering
* Middle East Technical University
* E-mail : [email protected]
*
* Copyright, 2012, Orhan Firat
*
* Vode An
*/
// Copyright (c) 2012 Orhan Firat
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "steps.h"
// 2D-Texture memory bindings for time-series
texture<TYPE, 2, hipReadModeElementType> texSrc;
texture<TYPE, 2, hipReadModeElementType> texDst;
// square<T> computes the square of a number f(x) -> x*x
template <typename T>
struct square
{
__host__ __device__
T operator()(const T& x) const {
return x * x;
}
};
/**
* Utility function for writing matrix chunk to binary file
*
* @param out Output array of calculated correlation coefficients
* @param sumX Sum of individual time series
* @param sumXX Sum of squares of individual time series
* @param tileSize Tile size for matrix chunk
* @param tileIdx Horizontal index of current tile
* @param tileIdy Vertical index of current tile
* @param numTimesteps Number of time series of a sample
* @param numChunks Total number of matrix chunks
*/
__global__ void kernel_pearson_corr(TYPE* out, TYPE* sumX, TYPE* sumXX,
int tileSize, int tileIdx, int tileIdy,
int numTimesteps, int numChunks){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int linear_idx = idy*tileSize+ idx;
int sumIdx = tileIdx*tileSize + idx;
int sumIdy = tileIdy*tileSize + idy;
if(idx<tileSize && idy<tileSize){
TYPE accum = 0;
for(int i=0; i<numTimesteps ; ++i){
TYPE x = tex2D(texSrc,TYPE(i),TYPE(idx));
TYPE y = tex2D(texDst,TYPE(i),TYPE(idy));
accum += x*y;
}
__syncthreads();
TYPE xbar = sumX[sumIdx]/numTimesteps;
TYPE ybar = sumX[sumIdy]/numTimesteps;
__syncthreads();
TYPE xx = sumXX[sumIdx];
TYPE yy = sumXX[sumIdy];
out[linear_idx] = (accum - (numTimesteps*xbar*ybar))/
(sqrtf( (xx - numTimesteps*xbar*xbar )*(yy - numTimesteps*ybar*ybar)));
}
}
/**
* Funtion to calculate sum of individual time series by a matrix-vector multiplication using cublas.
*
* @param meta Metadata structure for hyper parameters and stuff
*/
void step1_calculate_sumX(METADATA* meta){
int MATRIX_CHUNKSIZE = meta->chunkSize;
int NUM_SAMPLES = meta->numSamples;
int NUM_TIMESTEPS = meta->numTimesteps;
// calculate number of passes
int numPasses = (NUM_SAMPLES / MATRIX_CHUNKSIZE)+(NUM_SAMPLES % MATRIX_CHUNKSIZE == 0 ? 0 : 1);
// allocate result vector on host
meta->h_sumX = (TYPE*)malloc(NUM_SAMPLES*sizeof(TYPE));
// allocate array and set to one for multiplication
TYPE* ones_tmp = (TYPE*)malloc(NUM_SAMPLES*sizeof(TYPE));
for(int i=0 ; i<NUM_SAMPLES ; ++i)
ones_tmp[i]=1.0f;
TYPE *d_ones_arr, *d_row_sum, *d_matrix_chunk, *h_matrix;
h_matrix = meta->data;
// allocate chunk matrix on device
hipMalloc((void**)&d_matrix_chunk, MATRIX_CHUNKSIZE*NUM_TIMESTEPS*sizeof(TYPE));
// CUBLAS block
{
hipblasInit();
hipblasAlloc( MATRIX_CHUNKSIZE, sizeof(TYPE), (void**) &d_ones_arr); //vector A
hipblasAlloc( MATRIX_CHUNKSIZE, sizeof(TYPE), (void**) &d_row_sum); //vector B
// transfer host data to device for helper vector
hipblasSetVector( MATRIX_CHUNKSIZE, sizeof(TYPE), ones_tmp, 1, d_ones_arr, 1);
start_timer();
// process each chunk
for(int i=0 ; i<numPasses ; ++i){
// get chunk indices
int startInd = i*MATRIX_CHUNKSIZE;
int endInd = startInd + ( (i+1==numPasses) ? (NUM_SAMPLES % MATRIX_CHUNKSIZE) : MATRIX_CHUNKSIZE ) - 1;
int numels = endInd - startInd + 1;
// transfer host matrix chunk to device
hipMemcpy(d_matrix_chunk, &h_matrix[startInd*NUM_TIMESTEPS], numels*sizeof(TYPE)*NUM_TIMESTEPS, hipMemcpyHostToDevice);
hipMemset(d_row_sum, 0, NUM_TIMESTEPS * sizeof(TYPE));
// Perform matrix vector multiplication with cublas to obtain col sums
hipblasSgemv('T', NUM_TIMESTEPS, numels, 1, d_matrix_chunk, NUM_TIMESTEPS, d_ones_arr, 1, 0, d_row_sum, 1);
// transfer device solution vector chunk to host
hipMemcpy(&meta->h_sumX[startInd], d_row_sum, numels*sizeof(TYPE), hipMemcpyDeviceToHost);
}
stop_timer("STEP1::CUBLASSGEMV",1);
hipblasFree(d_ones_arr);
hipblasFree(d_row_sum);
hipblasShutdown();
}
hipFree(d_matrix_chunk);
free(ones_tmp);
}
/**
* Funtion to calculate sum of squares of individual time series by a matrix-vector
* multiplication using cublas. Square operation conducted using thrust library.
* Written as a seperate function with almost the same code except thrust routines,
* step1 and step2 are available for fully parallelisation for multiple GPUs
* or the ones that can launch concurrent kernels.
*
* @param meta Metadata structure for hyper parameters and stuff
*/
void step2_calculate_sumXX(METADATA* meta){
int MATRIX_CHUNKSIZE = meta->chunkSize;
int NUM_SAMPLES = meta->numSamples;
int NUM_TIMESTEPS = meta->numTimesteps;
// calculate number of passes
int numPasses = (NUM_SAMPLES / MATRIX_CHUNKSIZE)+(NUM_SAMPLES % MATRIX_CHUNKSIZE == 0 ? 0 : 1);
// allocate result vector on host
meta->h_sumXX = (TYPE*)malloc( NUM_SAMPLES*sizeof(TYPE) );
// allocate array and set to one for multiplication
TYPE* ones_tmp = (TYPE*)malloc( NUM_SAMPLES*sizeof(TYPE) );
for(int i=0 ; i<NUM_SAMPLES ; ++i)
ones_tmp[i]=1.0f;
TYPE *d_ones_arr, *d_row_sum, *d_matrix_chunk, *h_matrix;
h_matrix = meta->data;
// allocate chunk matrix on device
hipMalloc((void**)&d_matrix_chunk, MATRIX_CHUNKSIZE*NUM_TIMESTEPS*sizeof(TYPE));
// CUBLAS block
{
hipblasInit();
hipblasAlloc( MATRIX_CHUNKSIZE, sizeof(TYPE), (void**) &d_ones_arr); //vector A
hipblasAlloc( MATRIX_CHUNKSIZE, sizeof(TYPE), (void**) &d_row_sum); //vector B
// transfer host data to device for helper vector
hipblasSetVector( MATRIX_CHUNKSIZE, sizeof(TYPE), ones_tmp, 1, d_ones_arr, 1);
start_timer();
// process each chunk
for(int i=0 ; i<numPasses ; ++i){
// get chunk indices
int startInd = i*MATRIX_CHUNKSIZE;
int endInd = startInd + ( (i+1==numPasses) ? (NUM_SAMPLES % MATRIX_CHUNKSIZE) : MATRIX_CHUNKSIZE ) - 1;
int numels = endInd - startInd + 1;
// transfer host matrix chunk to device
hipMemcpy(d_matrix_chunk, &h_matrix[startInd*NUM_TIMESTEPS], numels*sizeof(TYPE)*NUM_TIMESTEPS, hipMemcpyHostToDevice);
hipMemset(d_row_sum, 0, NUM_TIMESTEPS * sizeof(TYPE));
// square matrix chunk using thrust
{
thrust::device_ptr<TYPE> dev_ptr1(d_matrix_chunk);
square<TYPE> unary_op;
thrust::transform(dev_ptr1, dev_ptr1+(numels*NUM_TIMESTEPS), dev_ptr1, unary_op);
}
// Perform matrix vector multiplication with cublas to obtain col sums
hipblasSgemv('T', NUM_TIMESTEPS, numels, 1, d_matrix_chunk, NUM_TIMESTEPS, d_ones_arr, 1, 0, d_row_sum, 1);
// transfer device solution vector chunk to host
hipMemcpy(&meta->h_sumXX[startInd], d_row_sum, numels*sizeof(TYPE), hipMemcpyDeviceToHost);
}
stop_timer("STEP2::CUBLASSGEMV",2);
hipblasFree(d_ones_arr);
hipblasFree(d_row_sum);
hipblasShutdown();
}
hipFree(d_matrix_chunk);
free(ones_tmp);
}
/**
* Major function to calculate pearson-correlation coefficient using previous
* steps' results on device.
*
* @param meta Metadata structure for hyper parameters and stuff
*/
void step3_calculate_pearson_corr(METADATA* meta){
clock_t start, stop, total=0; // for writing output to disk
PARAMETERS params;
calculate_parameters(¶ms, meta);
int CHUNK_SIZE_IN_BYTES = params.tileSize*params.tileSize*sizeof(TYPE);
// allocate resulting matrix chunk on device and host
TYPE *d_matrix_chunk;
hipMalloc((void**)&d_matrix_chunk, CHUNK_SIZE_IN_BYTES);
TYPE *h_matrix_chunk = (TYPE*)malloc( CHUNK_SIZE_IN_BYTES );
if(h_matrix_chunk==NULL){
printf("I cannot allocate any more please stop!\n");
exit(-1);
}
// transfer sum(Xi) and sum(square(Xi)) arrays to device-memory
// TODO try utilizing constant memory of texture memory
hipMalloc((void**)&meta->d_sumX, sizeof(TYPE)*meta->numSamples);
hipMalloc((void**)&meta->d_sumXX, sizeof(TYPE)*meta->numSamples);
Check_CUDA_Error("hipMalloc");
hipMemcpy(meta->d_sumX, meta->h_sumX, sizeof(TYPE)*meta->numSamples, hipMemcpyHostToDevice);
hipMemcpy(meta->d_sumXX, meta->h_sumXX, sizeof(TYPE)*meta->numSamples, hipMemcpyHostToDevice);
Check_CUDA_Error("hipMemcpyHostToDevice");
// allocate arrays for source and destination timeseries
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<TYPE>();
hipArray *cu_array_src,*cu_array_dst; //TODO:these two can be merged
hipMallocArray( &cu_array_src, &channelDesc, meta->numTimesteps , params.tileSize);
hipMallocArray( &cu_array_dst, &channelDesc, meta->numTimesteps , params.tileSize);
Check_CUDA_Error("hipMallocArray");
// set texture parameters
texSrc.filterMode = hipFilterModePoint; texSrc.normalized = false;
texDst.filterMode = hipFilterModePoint; texDst.normalized = false;
// configure grids and blocks
dim3 grid,block;
block.x = BLOCK_X;
block.y = BLOCK_Y;
grid.x = (int)(ceil((double)params.tileSize / (double)block.x));
grid.y = (int)(ceil((double)params.tileSize / (double)block.y));
// start computing correaltion
for(int i=0,ctr=0 ; i<params.numPass ; ++i ){
// transfer source timeseries data and bind to texture
CUDA_SAFE_CALL(hipMemcpy2DToArray( cu_array_src, 0, 0,
&meta->data[i*params.tileSize*meta->numTimesteps],
sizeof(TYPE)*meta->numTimesteps, sizeof(TYPE)*meta->numTimesteps,
( i+1==params.numPass ? (meta->numSamples%params.tileSize) : params.tileSize),
hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipBindTextureToArray( texSrc, cu_array_src, channelDesc));
for(int j=0 ; j<i+1 ; ++j ,++ctr){
start_timer();
// transfer destination timeseries data and bind to texture
CUDA_SAFE_CALL(hipMemcpy2DToArray( cu_array_dst, 0, 0,
&meta->data[j*params.tileSize*meta->numTimesteps],
sizeof(TYPE)*meta->numTimesteps, sizeof(TYPE)*meta->numTimesteps,
((i+1==params.numPass && j+1==params.numPass) ? (meta->numSamples%params.tileSize) : params.tileSize),
hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipBindTextureToArray( texDst, cu_array_dst, channelDesc));
CUDA_SAFE_CALL(hipMemset((void*)d_matrix_chunk, 0x0 , CHUNK_SIZE_IN_BYTES));
memset(h_matrix_chunk, 0x0 , CHUNK_SIZE_IN_BYTES);
/***********************
* start doing the job *
**********************/
hipLaunchKernelGGL(( kernel_pearson_corr), dim3(grid),dim3(block), 0, 0, d_matrix_chunk, meta->d_sumX, meta->d_sumXX, params.tileSize, i, j, meta->numTimesteps, params.numPass);
Check_CUDA_Error("kernel_pearson_corr");
/***********************
* stop doing the job *
**********************/
// copy result from device to host
CUDA_SAFE_CALL(hipMemcpy(h_matrix_chunk, d_matrix_chunk, CHUNK_SIZE_IN_BYTES , hipMemcpyDeviceToHost));
// unbind current texture
// TODO : try not unbinding
CUDA_SAFE_CALL(hipUnbindTexture(texDst));
stop_timer("STEP3::CORR",3);
printf("numPass:%d curr:[%d][%d] ctr:%d\n",params.numPass,i,j,ctr);
fflush(stdout);
start = clock();
if(meta->isOutputBinary)
write_matChunk_to_binFile(h_matrix_chunk, params.tileSize, params.tileSize,i,j, meta->outputDir);
else // ascii-txt
write_matChunk_to_file(h_matrix_chunk, params.tileSize, params.tileSize,i,j, meta->outputDir);
stop = clock();
total += (float)(stop-start);
}
// unbind current texture
// TODO : try not unbinding
CUDA_SAFE_CALL(hipUnbindTexture(texSrc));
}
set_host_timer("STEP4::WRITE_DATA",(float)(total),2);
// clean up
CUDA_SAFE_CALL(hipFreeArray(cu_array_src));
CUDA_SAFE_CALL(hipFreeArray(cu_array_dst));
CUDA_SAFE_CALL(hipFree(d_matrix_chunk));
free(h_matrix_chunk);
}
|
dda77c4fbcf079200ad90bb144160b018af080e7.cu
|
/*
* steps.cpp
*
* Description : GPU steps
*
* Created on : 01.Mar.2012
* Author : Orhan Firat
* Department of Computer Engineering
* Middle East Technical University
* E-mail : [email protected]
*
* Copyright, 2012, Orhan Firat
*
* Vode An
*/
// Copyright (c) 2012 Orhan Firat
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "steps.h"
// 2D-Texture memory bindings for time-series
texture<TYPE, 2, cudaReadModeElementType> texSrc;
texture<TYPE, 2, cudaReadModeElementType> texDst;
// square<T> computes the square of a number f(x) -> x*x
template <typename T>
struct square
{
__host__ __device__
T operator()(const T& x) const {
return x * x;
}
};
/**
* Utility function for writing matrix chunk to binary file
*
* @param out Output array of calculated correlation coefficients
* @param sumX Sum of individual time series
* @param sumXX Sum of squares of individual time series
* @param tileSize Tile size for matrix chunk
* @param tileIdx Horizontal index of current tile
* @param tileIdy Vertical index of current tile
* @param numTimesteps Number of time series of a sample
* @param numChunks Total number of matrix chunks
*/
__global__ void kernel_pearson_corr(TYPE* out, TYPE* sumX, TYPE* sumXX,
int tileSize, int tileIdx, int tileIdy,
int numTimesteps, int numChunks){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int linear_idx = idy*tileSize+ idx;
int sumIdx = tileIdx*tileSize + idx;
int sumIdy = tileIdy*tileSize + idy;
if(idx<tileSize && idy<tileSize){
TYPE accum = 0;
for(int i=0; i<numTimesteps ; ++i){
TYPE x = tex2D(texSrc,TYPE(i),TYPE(idx));
TYPE y = tex2D(texDst,TYPE(i),TYPE(idy));
accum += x*y;
}
__syncthreads();
TYPE xbar = sumX[sumIdx]/numTimesteps;
TYPE ybar = sumX[sumIdy]/numTimesteps;
__syncthreads();
TYPE xx = sumXX[sumIdx];
TYPE yy = sumXX[sumIdy];
out[linear_idx] = (accum - (numTimesteps*xbar*ybar))/
(sqrtf( (xx - numTimesteps*xbar*xbar )*(yy - numTimesteps*ybar*ybar)));
}
}
/**
* Funtion to calculate sum of individual time series by a matrix-vector multiplication using cublas.
*
* @param meta Metadata structure for hyper parameters and stuff
*/
void step1_calculate_sumX(METADATA* meta){
int MATRIX_CHUNKSIZE = meta->chunkSize;
int NUM_SAMPLES = meta->numSamples;
int NUM_TIMESTEPS = meta->numTimesteps;
// calculate number of passes
int numPasses = (NUM_SAMPLES / MATRIX_CHUNKSIZE)+(NUM_SAMPLES % MATRIX_CHUNKSIZE == 0 ? 0 : 1);
// allocate result vector on host
meta->h_sumX = (TYPE*)malloc(NUM_SAMPLES*sizeof(TYPE));
// allocate array and set to one for multiplication
TYPE* ones_tmp = (TYPE*)malloc(NUM_SAMPLES*sizeof(TYPE));
for(int i=0 ; i<NUM_SAMPLES ; ++i)
ones_tmp[i]=1.0f;
TYPE *d_ones_arr, *d_row_sum, *d_matrix_chunk, *h_matrix;
h_matrix = meta->data;
// allocate chunk matrix on device
cudaMalloc((void**)&d_matrix_chunk, MATRIX_CHUNKSIZE*NUM_TIMESTEPS*sizeof(TYPE));
// CUBLAS block
{
cublasInit();
cublasAlloc( MATRIX_CHUNKSIZE, sizeof(TYPE), (void**) &d_ones_arr); //vector A
cublasAlloc( MATRIX_CHUNKSIZE, sizeof(TYPE), (void**) &d_row_sum); //vector B
// transfer host data to device for helper vector
cublasSetVector( MATRIX_CHUNKSIZE, sizeof(TYPE), ones_tmp, 1, d_ones_arr, 1);
start_timer();
// process each chunk
for(int i=0 ; i<numPasses ; ++i){
// get chunk indices
int startInd = i*MATRIX_CHUNKSIZE;
int endInd = startInd + ( (i+1==numPasses) ? (NUM_SAMPLES % MATRIX_CHUNKSIZE) : MATRIX_CHUNKSIZE ) - 1;
int numels = endInd - startInd + 1;
// transfer host matrix chunk to device
cudaMemcpy(d_matrix_chunk, &h_matrix[startInd*NUM_TIMESTEPS], numels*sizeof(TYPE)*NUM_TIMESTEPS, cudaMemcpyHostToDevice);
cudaMemset(d_row_sum, 0, NUM_TIMESTEPS * sizeof(TYPE));
// Perform matrix vector multiplication with cublas to obtain col sums
cublasSgemv('T', NUM_TIMESTEPS, numels, 1, d_matrix_chunk, NUM_TIMESTEPS, d_ones_arr, 1, 0, d_row_sum, 1);
// transfer device solution vector chunk to host
cudaMemcpy(&meta->h_sumX[startInd], d_row_sum, numels*sizeof(TYPE), cudaMemcpyDeviceToHost);
}
stop_timer("STEP1::CUBLASSGEMV",1);
cublasFree(d_ones_arr);
cublasFree(d_row_sum);
cublasShutdown();
}
cudaFree(d_matrix_chunk);
free(ones_tmp);
}
/**
* Funtion to calculate sum of squares of individual time series by a matrix-vector
* multiplication using cublas. Square operation conducted using thrust library.
* Written as a seperate function with almost the same code except thrust routines,
* step1 and step2 are available for fully parallelisation for multiple GPUs
* or the ones that can launch concurrent kernels.
*
* @param meta Metadata structure for hyper parameters and stuff
*/
void step2_calculate_sumXX(METADATA* meta){
int MATRIX_CHUNKSIZE = meta->chunkSize;
int NUM_SAMPLES = meta->numSamples;
int NUM_TIMESTEPS = meta->numTimesteps;
// calculate number of passes
int numPasses = (NUM_SAMPLES / MATRIX_CHUNKSIZE)+(NUM_SAMPLES % MATRIX_CHUNKSIZE == 0 ? 0 : 1);
// allocate result vector on host
meta->h_sumXX = (TYPE*)malloc( NUM_SAMPLES*sizeof(TYPE) );
// allocate array and set to one for multiplication
TYPE* ones_tmp = (TYPE*)malloc( NUM_SAMPLES*sizeof(TYPE) );
for(int i=0 ; i<NUM_SAMPLES ; ++i)
ones_tmp[i]=1.0f;
TYPE *d_ones_arr, *d_row_sum, *d_matrix_chunk, *h_matrix;
h_matrix = meta->data;
// allocate chunk matrix on device
cudaMalloc((void**)&d_matrix_chunk, MATRIX_CHUNKSIZE*NUM_TIMESTEPS*sizeof(TYPE));
// CUBLAS block
{
cublasInit();
cublasAlloc( MATRIX_CHUNKSIZE, sizeof(TYPE), (void**) &d_ones_arr); //vector A
cublasAlloc( MATRIX_CHUNKSIZE, sizeof(TYPE), (void**) &d_row_sum); //vector B
// transfer host data to device for helper vector
cublasSetVector( MATRIX_CHUNKSIZE, sizeof(TYPE), ones_tmp, 1, d_ones_arr, 1);
start_timer();
// process each chunk
for(int i=0 ; i<numPasses ; ++i){
// get chunk indices
int startInd = i*MATRIX_CHUNKSIZE;
int endInd = startInd + ( (i+1==numPasses) ? (NUM_SAMPLES % MATRIX_CHUNKSIZE) : MATRIX_CHUNKSIZE ) - 1;
int numels = endInd - startInd + 1;
// transfer host matrix chunk to device
cudaMemcpy(d_matrix_chunk, &h_matrix[startInd*NUM_TIMESTEPS], numels*sizeof(TYPE)*NUM_TIMESTEPS, cudaMemcpyHostToDevice);
cudaMemset(d_row_sum, 0, NUM_TIMESTEPS * sizeof(TYPE));
// square matrix chunk using thrust
{
thrust::device_ptr<TYPE> dev_ptr1(d_matrix_chunk);
square<TYPE> unary_op;
thrust::transform(dev_ptr1, dev_ptr1+(numels*NUM_TIMESTEPS), dev_ptr1, unary_op);
}
// Perform matrix vector multiplication with cublas to obtain col sums
cublasSgemv('T', NUM_TIMESTEPS, numels, 1, d_matrix_chunk, NUM_TIMESTEPS, d_ones_arr, 1, 0, d_row_sum, 1);
// transfer device solution vector chunk to host
cudaMemcpy(&meta->h_sumXX[startInd], d_row_sum, numels*sizeof(TYPE), cudaMemcpyDeviceToHost);
}
stop_timer("STEP2::CUBLASSGEMV",2);
cublasFree(d_ones_arr);
cublasFree(d_row_sum);
cublasShutdown();
}
cudaFree(d_matrix_chunk);
free(ones_tmp);
}
/**
* Major function to calculate pearson-correlation coefficient using previous
* steps' results on device.
*
* @param meta Metadata structure for hyper parameters and stuff
*/
void step3_calculate_pearson_corr(METADATA* meta){
clock_t start, stop, total=0; // for writing output to disk
PARAMETERS params;
calculate_parameters(¶ms, meta);
int CHUNK_SIZE_IN_BYTES = params.tileSize*params.tileSize*sizeof(TYPE);
// allocate resulting matrix chunk on device and host
TYPE *d_matrix_chunk;
cudaMalloc((void**)&d_matrix_chunk, CHUNK_SIZE_IN_BYTES);
TYPE *h_matrix_chunk = (TYPE*)malloc( CHUNK_SIZE_IN_BYTES );
if(h_matrix_chunk==NULL){
printf("I cannot allocate any more please stop!\n");
exit(-1);
}
// transfer sum(Xi) and sum(square(Xi)) arrays to device-memory
// TODO try utilizing constant memory of texture memory
cudaMalloc((void**)&meta->d_sumX, sizeof(TYPE)*meta->numSamples);
cudaMalloc((void**)&meta->d_sumXX, sizeof(TYPE)*meta->numSamples);
Check_CUDA_Error("cudaMalloc");
cudaMemcpy(meta->d_sumX, meta->h_sumX, sizeof(TYPE)*meta->numSamples, cudaMemcpyHostToDevice);
cudaMemcpy(meta->d_sumXX, meta->h_sumXX, sizeof(TYPE)*meta->numSamples, cudaMemcpyHostToDevice);
Check_CUDA_Error("cudaMemcpyHostToDevice");
// allocate arrays for source and destination timeseries
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<TYPE>();
cudaArray *cu_array_src,*cu_array_dst; //TODO:these two can be merged
cudaMallocArray( &cu_array_src, &channelDesc, meta->numTimesteps , params.tileSize);
cudaMallocArray( &cu_array_dst, &channelDesc, meta->numTimesteps , params.tileSize);
Check_CUDA_Error("cudaMallocArray");
// set texture parameters
texSrc.filterMode = cudaFilterModePoint; texSrc.normalized = false;
texDst.filterMode = cudaFilterModePoint; texDst.normalized = false;
// configure grids and blocks
dim3 grid,block;
block.x = BLOCK_X;
block.y = BLOCK_Y;
grid.x = (int)(ceil((double)params.tileSize / (double)block.x));
grid.y = (int)(ceil((double)params.tileSize / (double)block.y));
// start computing correaltion
for(int i=0,ctr=0 ; i<params.numPass ; ++i ){
// transfer source timeseries data and bind to texture
CUDA_SAFE_CALL(cudaMemcpy2DToArray( cu_array_src, 0, 0,
&meta->data[i*params.tileSize*meta->numTimesteps],
sizeof(TYPE)*meta->numTimesteps, sizeof(TYPE)*meta->numTimesteps,
( i+1==params.numPass ? (meta->numSamples%params.tileSize) : params.tileSize),
cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaBindTextureToArray( texSrc, cu_array_src, channelDesc));
for(int j=0 ; j<i+1 ; ++j ,++ctr){
start_timer();
// transfer destination timeseries data and bind to texture
CUDA_SAFE_CALL(cudaMemcpy2DToArray( cu_array_dst, 0, 0,
&meta->data[j*params.tileSize*meta->numTimesteps],
sizeof(TYPE)*meta->numTimesteps, sizeof(TYPE)*meta->numTimesteps,
((i+1==params.numPass && j+1==params.numPass) ? (meta->numSamples%params.tileSize) : params.tileSize),
cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaBindTextureToArray( texDst, cu_array_dst, channelDesc));
CUDA_SAFE_CALL(cudaMemset((void*)d_matrix_chunk, 0x0 , CHUNK_SIZE_IN_BYTES));
memset(h_matrix_chunk, 0x0 , CHUNK_SIZE_IN_BYTES);
/***********************
* start doing the job *
**********************/
kernel_pearson_corr<<<grid,block>>>(d_matrix_chunk, meta->d_sumX, meta->d_sumXX, params.tileSize, i, j, meta->numTimesteps, params.numPass);
Check_CUDA_Error("kernel_pearson_corr");
/***********************
* stop doing the job *
**********************/
// copy result from device to host
CUDA_SAFE_CALL(cudaMemcpy(h_matrix_chunk, d_matrix_chunk, CHUNK_SIZE_IN_BYTES , cudaMemcpyDeviceToHost));
// unbind current texture
// TODO : try not unbinding
CUDA_SAFE_CALL(cudaUnbindTexture(texDst));
stop_timer("STEP3::CORR",3);
printf("numPass:%d curr:[%d][%d] ctr:%d\n",params.numPass,i,j,ctr);
fflush(stdout);
start = clock();
if(meta->isOutputBinary)
write_matChunk_to_binFile(h_matrix_chunk, params.tileSize, params.tileSize,i,j, meta->outputDir);
else // ascii-txt
write_matChunk_to_file(h_matrix_chunk, params.tileSize, params.tileSize,i,j, meta->outputDir);
stop = clock();
total += (float)(stop-start);
}
// unbind current texture
// TODO : try not unbinding
CUDA_SAFE_CALL(cudaUnbindTexture(texSrc));
}
set_host_timer("STEP4::WRITE_DATA",(float)(total),2);
// clean up
CUDA_SAFE_CALL(cudaFreeArray(cu_array_src));
CUDA_SAFE_CALL(cudaFreeArray(cu_array_dst));
CUDA_SAFE_CALL(cudaFree(d_matrix_chunk));
free(h_matrix_chunk);
}
|
d3d9630acdc7b604e2d41aedf08d5abaff06d1a0.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Cryptohaze GPU Rainbow Tables
Copyright (C) 2011 Bitweasil (http://www.cryptohaze.com/)
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
// CUDA SHA1 kernels for table generation.
// This is here so Netbeans doesn't error-spam my IDE
#if !defined(__HIPCC__)
// define the keywords, so that the IDE does not complain about them
#define __global__
#define __device__
#define __shared__
#define __constant__
#define blockIdx.x 1
#define blockDim.x 1
#define threadIdx.x 1
#endif
#include <hip/hip_runtime.h>
#include <cutil.h>
#include <hip/hip_runtime_api.h>
#ifdef _WIN32
#include "windows/stdint.h"
#else
#include <stdint.h>
#endif
typedef uint32_t uint32_t;
// Some CUDA variables
__device__ __constant__ unsigned char SHA1_Generate_Device_Charset_Constant[512]; // Constant space for charset
__device__ __constant__ uint32_t SHA1_Generate_Device_Charset_Length; // Character set length
__device__ __constant__ uint32_t SHA1_Generate_Device_Chain_Length; // May as well pull it from constant memory... faster.
__device__ __constant__ uint32_t SHA1_Generate_Device_Number_Of_Chains; // Same, may as well be constant.
__device__ __constant__ uint32_t SHA1_Generate_Device_Table_Index;
__device__ __constant__ uint32_t SHA1_Generate_Device_Number_Of_Threads; // It needs this, and can't easily calculate it
#include "../../inc/CUDA_Common/CUDA_SHA1.h"
#include "../../inc/CUDA_Common/Hash_Common.h"
#include "../../inc/GRT_CUDA_device/CUDA_Reduction_Functions.h"
#include "../../inc/GRT_CUDA_device/CUDA_Load_Store_Registers.h"
#define CREATE_SHA1_GEN_KERNEL(length) \
__global__ void MakeSHA1ChainLen##length(unsigned char *InitialPasswordArray, unsigned char *OutputHashArray, \
uint32_t PasswordSpaceOffset, uint32_t StartChainIndex, uint32_t StepsToRun, uint32_t charset_offset) { \
const int pass_length = length; \
uint32_t CurrentStep, PassCount, password_index; \
uint32_t b0,b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15; \
uint32_t a,b,c,d,e; \
uint32_t *InitialArray32; \
uint32_t *OutputArray32; \
InitialArray32 = (uint32_t *)InitialPasswordArray; \
OutputArray32 = (uint32_t *)OutputHashArray; \
__shared__ char charset[512]; \
copySingleCharsetToShared(charset, SHA1_Generate_Device_Charset_Constant); \
password_index = ((blockIdx.x*blockDim.x + threadIdx.x) + (PasswordSpaceOffset * SHA1_Generate_Device_Number_Of_Threads)); \
if (password_index >= SHA1_Generate_Device_Number_Of_Chains) { \
return; \
} \
clearB0toB15(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15); \
LoadMD5RegistersFromGlobalMemory(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15, \
InitialArray32, SHA1_Generate_Device_Number_Of_Chains, password_index, pass_length); \
for (PassCount = 0; PassCount < StepsToRun; PassCount++) { \
CurrentStep = PassCount + StartChainIndex; \
b15 = ((pass_length * 8) & 0xff) << 24 | (((pass_length * 8) >> 8) & 0xff) << 16; \
SetCharacterAtPosition(0x80, pass_length, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 ); \
SHA_TRANSFORM(a, b, c, d, e, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); \
a = reverse(a);b = reverse(b);c = reverse(c);d = reverse(d);e = reverse(e); \
clearB0toB15(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15); \
reduceSingleCharsetNormal(b0, b1, b2, a, b, c, d, CurrentStep, charset, charset_offset, pass_length, SHA1_Generate_Device_Table_Index); \
charset_offset++; \
if (charset_offset >= SHA1_Generate_Device_Charset_Length) { \
charset_offset = 0; \
} \
} \
if (CurrentStep >= (SHA1_Generate_Device_Chain_Length - 1)) { \
OutputArray32[0 * SHA1_Generate_Device_Number_Of_Chains + password_index] = a; \
OutputArray32[1 * SHA1_Generate_Device_Number_Of_Chains + password_index] = b; \
OutputArray32[2 * SHA1_Generate_Device_Number_Of_Chains + password_index] = c; \
OutputArray32[3 * SHA1_Generate_Device_Number_Of_Chains + password_index] = d; \
OutputArray32[4 * SHA1_Generate_Device_Number_Of_Chains + password_index] = e; \
} \
else { \
SaveMD5RegistersIntoGlobalMemory(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15, \
InitialArray32, SHA1_Generate_Device_Number_Of_Chains, password_index, pass_length); \
} \
}
CREATE_SHA1_GEN_KERNEL(6)
CREATE_SHA1_GEN_KERNEL(7)
CREATE_SHA1_GEN_KERNEL(8)
CREATE_SHA1_GEN_KERNEL(9)
CREATE_SHA1_GEN_KERNEL(10)
extern "C" void copyConstantsToSHA1(unsigned char *HOST_Charset, uint32_t HOST_Charset_Length,
uint32_t HOST_Chain_Length, uint32_t HOST_Number_Of_Chains, uint32_t HOST_Table_Index,
uint32_t HOST_Number_Of_Threads) {
hipMemcpyToSymbol("SHA1_Generate_Device_Charset_Constant",HOST_Charset, 512);
hipMemcpyToSymbol("SHA1_Generate_Device_Charset_Length", &HOST_Charset_Length, sizeof(uint32_t));
// Copy general table parameters to constant space
hipMemcpyToSymbol("SHA1_Generate_Device_Chain_Length", &HOST_Chain_Length, sizeof(uint32_t));
hipMemcpyToSymbol("SHA1_Generate_Device_Number_Of_Chains", &HOST_Number_Of_Chains, sizeof(uint32_t));
hipMemcpyToSymbol("SHA1_Generate_Device_Table_Index", &HOST_Table_Index, sizeof(uint32_t));
hipMemcpyToSymbol("SHA1_Generate_Device_Number_Of_Threads", &HOST_Number_Of_Threads, sizeof(HOST_Number_Of_Threads));
}
extern "C" void LaunchGenerateKernelSHA1(int passwordLength, uint32_t CUDA_Blocks,
uint32_t CUDA_Threads, unsigned char *DEVICE_Initial_Passwords,
unsigned char *DEVICE_End_Hashes, uint32_t PasswordSpaceOffset,
uint32_t CurrentChainStartOffset, uint32_t StepsPerInvocation, uint32_t CharsetOffset) {
switch (passwordLength) {
case 6:
hipLaunchKernelGGL(( MakeSHA1ChainLen6) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0,
DEVICE_Initial_Passwords, DEVICE_End_Hashes, PasswordSpaceOffset,
CurrentChainStartOffset, StepsPerInvocation, CharsetOffset);
break;
case 7:
hipLaunchKernelGGL(( MakeSHA1ChainLen7) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0,
DEVICE_Initial_Passwords, DEVICE_End_Hashes, PasswordSpaceOffset,
CurrentChainStartOffset, StepsPerInvocation, CharsetOffset);
break;
case 8:
hipLaunchKernelGGL(( MakeSHA1ChainLen8) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0,
DEVICE_Initial_Passwords, DEVICE_End_Hashes, PasswordSpaceOffset,
CurrentChainStartOffset, StepsPerInvocation, CharsetOffset);
break;
case 9:
hipLaunchKernelGGL(( MakeSHA1ChainLen9) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0,
DEVICE_Initial_Passwords, DEVICE_End_Hashes, PasswordSpaceOffset,
CurrentChainStartOffset, StepsPerInvocation, CharsetOffset);
break;
case 10:
hipLaunchKernelGGL(( MakeSHA1ChainLen10) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0,
DEVICE_Initial_Passwords, DEVICE_End_Hashes, PasswordSpaceOffset,
CurrentChainStartOffset, StepsPerInvocation, CharsetOffset);
break;
default:
printf("Password length %d not supported!", passwordLength);
exit(1);
}
}
|
d3d9630acdc7b604e2d41aedf08d5abaff06d1a0.cu
|
/*
Cryptohaze GPU Rainbow Tables
Copyright (C) 2011 Bitweasil (http://www.cryptohaze.com/)
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
// CUDA SHA1 kernels for table generation.
// This is here so Netbeans doesn't error-spam my IDE
#if !defined(__CUDACC__)
// define the keywords, so that the IDE does not complain about them
#define __global__
#define __device__
#define __shared__
#define __constant__
#define blockIdx.x 1
#define blockDim.x 1
#define threadIdx.x 1
#endif
#include <cuda.h>
#include <cutil.h>
#include <cuda_runtime_api.h>
#ifdef _WIN32
#include "windows/stdint.h"
#else
#include <stdint.h>
#endif
typedef uint32_t uint32_t;
// Some CUDA variables
__device__ __constant__ unsigned char SHA1_Generate_Device_Charset_Constant[512]; // Constant space for charset
__device__ __constant__ uint32_t SHA1_Generate_Device_Charset_Length; // Character set length
__device__ __constant__ uint32_t SHA1_Generate_Device_Chain_Length; // May as well pull it from constant memory... faster.
__device__ __constant__ uint32_t SHA1_Generate_Device_Number_Of_Chains; // Same, may as well be constant.
__device__ __constant__ uint32_t SHA1_Generate_Device_Table_Index;
__device__ __constant__ uint32_t SHA1_Generate_Device_Number_Of_Threads; // It needs this, and can't easily calculate it
#include "../../inc/CUDA_Common/CUDA_SHA1.h"
#include "../../inc/CUDA_Common/Hash_Common.h"
#include "../../inc/GRT_CUDA_device/CUDA_Reduction_Functions.h"
#include "../../inc/GRT_CUDA_device/CUDA_Load_Store_Registers.h"
#define CREATE_SHA1_GEN_KERNEL(length) \
__global__ void MakeSHA1ChainLen##length(unsigned char *InitialPasswordArray, unsigned char *OutputHashArray, \
uint32_t PasswordSpaceOffset, uint32_t StartChainIndex, uint32_t StepsToRun, uint32_t charset_offset) { \
const int pass_length = length; \
uint32_t CurrentStep, PassCount, password_index; \
uint32_t b0,b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15; \
uint32_t a,b,c,d,e; \
uint32_t *InitialArray32; \
uint32_t *OutputArray32; \
InitialArray32 = (uint32_t *)InitialPasswordArray; \
OutputArray32 = (uint32_t *)OutputHashArray; \
__shared__ char charset[512]; \
copySingleCharsetToShared(charset, SHA1_Generate_Device_Charset_Constant); \
password_index = ((blockIdx.x*blockDim.x + threadIdx.x) + (PasswordSpaceOffset * SHA1_Generate_Device_Number_Of_Threads)); \
if (password_index >= SHA1_Generate_Device_Number_Of_Chains) { \
return; \
} \
clearB0toB15(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15); \
LoadMD5RegistersFromGlobalMemory(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15, \
InitialArray32, SHA1_Generate_Device_Number_Of_Chains, password_index, pass_length); \
for (PassCount = 0; PassCount < StepsToRun; PassCount++) { \
CurrentStep = PassCount + StartChainIndex; \
b15 = ((pass_length * 8) & 0xff) << 24 | (((pass_length * 8) >> 8) & 0xff) << 16; \
SetCharacterAtPosition(0x80, pass_length, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 ); \
SHA_TRANSFORM(a, b, c, d, e, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); \
a = reverse(a);b = reverse(b);c = reverse(c);d = reverse(d);e = reverse(e); \
clearB0toB15(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15); \
reduceSingleCharsetNormal(b0, b1, b2, a, b, c, d, CurrentStep, charset, charset_offset, pass_length, SHA1_Generate_Device_Table_Index); \
charset_offset++; \
if (charset_offset >= SHA1_Generate_Device_Charset_Length) { \
charset_offset = 0; \
} \
} \
if (CurrentStep >= (SHA1_Generate_Device_Chain_Length - 1)) { \
OutputArray32[0 * SHA1_Generate_Device_Number_Of_Chains + password_index] = a; \
OutputArray32[1 * SHA1_Generate_Device_Number_Of_Chains + password_index] = b; \
OutputArray32[2 * SHA1_Generate_Device_Number_Of_Chains + password_index] = c; \
OutputArray32[3 * SHA1_Generate_Device_Number_Of_Chains + password_index] = d; \
OutputArray32[4 * SHA1_Generate_Device_Number_Of_Chains + password_index] = e; \
} \
else { \
SaveMD5RegistersIntoGlobalMemory(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15, \
InitialArray32, SHA1_Generate_Device_Number_Of_Chains, password_index, pass_length); \
} \
}
CREATE_SHA1_GEN_KERNEL(6)
CREATE_SHA1_GEN_KERNEL(7)
CREATE_SHA1_GEN_KERNEL(8)
CREATE_SHA1_GEN_KERNEL(9)
CREATE_SHA1_GEN_KERNEL(10)
extern "C" void copyConstantsToSHA1(unsigned char *HOST_Charset, uint32_t HOST_Charset_Length,
uint32_t HOST_Chain_Length, uint32_t HOST_Number_Of_Chains, uint32_t HOST_Table_Index,
uint32_t HOST_Number_Of_Threads) {
cudaMemcpyToSymbol("SHA1_Generate_Device_Charset_Constant",HOST_Charset, 512);
cudaMemcpyToSymbol("SHA1_Generate_Device_Charset_Length", &HOST_Charset_Length, sizeof(uint32_t));
// Copy general table parameters to constant space
cudaMemcpyToSymbol("SHA1_Generate_Device_Chain_Length", &HOST_Chain_Length, sizeof(uint32_t));
cudaMemcpyToSymbol("SHA1_Generate_Device_Number_Of_Chains", &HOST_Number_Of_Chains, sizeof(uint32_t));
cudaMemcpyToSymbol("SHA1_Generate_Device_Table_Index", &HOST_Table_Index, sizeof(uint32_t));
cudaMemcpyToSymbol("SHA1_Generate_Device_Number_Of_Threads", &HOST_Number_Of_Threads, sizeof(HOST_Number_Of_Threads));
}
extern "C" void LaunchGenerateKernelSHA1(int passwordLength, uint32_t CUDA_Blocks,
uint32_t CUDA_Threads, unsigned char *DEVICE_Initial_Passwords,
unsigned char *DEVICE_End_Hashes, uint32_t PasswordSpaceOffset,
uint32_t CurrentChainStartOffset, uint32_t StepsPerInvocation, uint32_t CharsetOffset) {
switch (passwordLength) {
case 6:
MakeSHA1ChainLen6 <<< CUDA_Blocks, CUDA_Threads >>>
(DEVICE_Initial_Passwords, DEVICE_End_Hashes, PasswordSpaceOffset,
CurrentChainStartOffset, StepsPerInvocation, CharsetOffset);
break;
case 7:
MakeSHA1ChainLen7 <<< CUDA_Blocks, CUDA_Threads >>>
(DEVICE_Initial_Passwords, DEVICE_End_Hashes, PasswordSpaceOffset,
CurrentChainStartOffset, StepsPerInvocation, CharsetOffset);
break;
case 8:
MakeSHA1ChainLen8 <<< CUDA_Blocks, CUDA_Threads >>>
(DEVICE_Initial_Passwords, DEVICE_End_Hashes, PasswordSpaceOffset,
CurrentChainStartOffset, StepsPerInvocation, CharsetOffset);
break;
case 9:
MakeSHA1ChainLen9 <<< CUDA_Blocks, CUDA_Threads >>>
(DEVICE_Initial_Passwords, DEVICE_End_Hashes, PasswordSpaceOffset,
CurrentChainStartOffset, StepsPerInvocation, CharsetOffset);
break;
case 10:
MakeSHA1ChainLen10 <<< CUDA_Blocks, CUDA_Threads >>>
(DEVICE_Initial_Passwords, DEVICE_End_Hashes, PasswordSpaceOffset,
CurrentChainStartOffset, StepsPerInvocation, CharsetOffset);
break;
default:
printf("Password length %d not supported!", passwordLength);
exit(1);
}
}
|
1d279a888d3171ce13f494f2cb6a4726f5499c17.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Defines the basic matrix operations for the AIJ (compressed row)
matrix storage format using the CUSPARSE library,
*/
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <petscconf.h>
#include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
#include <../src/mat/impls/sbaij/seq/sbaij.h>
#include <../src/vec/vec/impls/dvecimpl.h>
#include <petsc/private/vecimpl.h>
#undef VecType
#include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h>
#include <thrust/adjacent_difference.h>
#if PETSC_CPP_VERSION >= 14
#define PETSC_HAVE_THRUST_ASYNC 1
// thrust::for_each(thrust::hip::par.on()) requires C++14
#include <thrust/async/for_each.h>
#endif
#include <thrust/iterator/constant_iterator.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
const char *const MatCUSPARSEStorageFormats[] = {"CSR", "ELL", "HYB", "MatCUSPARSEStorageFormat", "MAT_CUSPARSE_", 0};
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
/* The following are copied from hipsparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in
0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them.
typedef enum {
HIPSPARSE_MV_ALG_DEFAULT = 0,
HIPSPARSE_COOMV_ALG = 1,
HIPSPARSE_CSRMV_ALG1 = 2,
HIPSPARSE_CSRMV_ALG2 = 3
} hipsparseSpMVAlg_t;
typedef enum {
HIPSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0,
HIPSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG1) = 1,
HIPSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG2) = 2,
HIPSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3,
HIPSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_CSRMM_ALG1) = 4,
CUSPARSE_SPMM_ALG_DEFAULT = 0,
HIPSPARSE_SPMM_COO_ALG1 = 1,
HIPSPARSE_SPMM_COO_ALG2 = 2,
CUSPARSE_SPMM_COO_ALG3 = 3,
CUSPARSE_SPMM_COO_ALG4 = 5,
HIPSPARSE_CSRMM_ALG1 = 4,
CUSPARSE_SPMM_CSR_ALG2 = 6,
} hipsparseSpMMAlg_t;
typedef enum {
HIPSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministic
HIPSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministic
} hipsparseCsr2CscAlg_t;
*/
const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT", "COOMV_ALG", "CSRMV_ALG1", "CSRMV_ALG2", "hipsparseSpMVAlg_t", "CUSPARSE_", 0};
const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT", "COO_ALG1", "COO_ALG2", "COO_ALG3", "CSR_ALG1", "COO_ALG4", "CSR_ALG2", "hipsparseSpMMAlg_t", "CUSPARSE_SPMM_", 0};
const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID" /*cusparse does not have enum 0! We created one*/, "ALG1", "ALG2", "hipsparseCsr2CscAlg_t", "CUSPARSE_CSR2CSC_", 0};
#endif
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, const MatFactorInfo *);
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, const MatFactorInfo *);
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat, Mat, const MatFactorInfo *);
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, IS, const MatFactorInfo *);
#if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0)
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat, Vec, Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec);
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat, Vec, Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat, Vec, Vec);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **);
#endif
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(Mat, PetscOptionItems *PetscOptionsObject);
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat, PetscScalar, Mat, MatStructure);
static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat, PetscScalar);
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat, Vec, Vec);
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec);
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec);
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec);
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec);
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec);
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec, PetscBool, PetscBool);
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **, MatCUSPARSEStorageFormat);
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors **);
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **);
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat);
static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat, PetscBool);
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat, PetscInt, const PetscInt[], PetscScalar[]);
static PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat, PetscCount, PetscInt[], PetscInt[]);
static PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat, const PetscScalar[], InsertMode);
PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A, MatCUSPARSEFormatOperation op, MatCUSPARSEStorageFormat format)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr;
PetscFunctionBegin;
switch (op) {
case MAT_CUSPARSE_MULT:
cusparsestruct->format = format;
break;
case MAT_CUSPARSE_ALL:
cusparsestruct->format = format;
break;
default:
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.", op);
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@
MatCUSPARSESetFormat - Sets the storage format of `MATSEQCUSPARSE` matrices for a particular
operation. Only the `MatMult()` operation can use different GPU storage formats
Not Collective
Input Parameters:
+ A - Matrix of type `MATSEQAIJCUSPARSE`
. op - `MatCUSPARSEFormatOperation`. `MATSEQAIJCUSPARSE` matrices support `MAT_CUSPARSE_MULT` and `MAT_CUSPARSE_ALL`.
`MATMPIAIJCUSPARSE` matrices support `MAT_CUSPARSE_MULT_DIAG`,`MAT_CUSPARSE_MULT_OFFDIAG`, and `MAT_CUSPARSE_ALL`.
- format - `MatCUSPARSEStorageFormat` (one of `MAT_CUSPARSE_CSR`, `MAT_CUSPARSE_ELL`, `MAT_CUSPARSE_HYB`.)
Level: intermediate
.seealso: [](chapter_matrices), `Mat`, `Mat`, `MATSEQAIJCUSPARSE`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation`
@*/
PetscErrorCode MatCUSPARSESetFormat(Mat A, MatCUSPARSEFormatOperation op, MatCUSPARSEStorageFormat format)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscTryMethod(A, "MatCUSPARSESetFormat_C", (Mat, MatCUSPARSEFormatOperation, MatCUSPARSEStorageFormat), (A, op, format));
PetscFunctionReturn(PETSC_SUCCESS);
}
PETSC_INTERN PetscErrorCode MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE(Mat A, PetscBool use_cpu)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr;
PetscFunctionBegin;
cusparsestruct->use_cpu_solve = use_cpu;
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@
MatCUSPARSESetUseCPUSolve - Sets to use CPU `MatSolve()`.
Input Parameters:
+ A - Matrix of type `MATSEQAIJCUSPARSE`
- use_cpu - set flag for using the built-in CPU `MatSolve()`
Level: intermediate
Note:
The cuSparse LU solver currently computes the factors with the built-in CPU method
and moves the factors to the GPU for the solve. We have observed better performance keeping the data on the CPU and computing the solve there.
This method to specify if the solve is done on the CPU or GPU (GPU is the default).
.seealso: [](chapter_matrices), `Mat`, `MatSolve()`, `MATSEQAIJCUSPARSE`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation`
@*/
PetscErrorCode MatCUSPARSESetUseCPUSolve(Mat A, PetscBool use_cpu)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscTryMethod(A, "MatCUSPARSESetUseCPUSolve_C", (Mat, PetscBool), (A, use_cpu));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatSetOption_SeqAIJCUSPARSE(Mat A, MatOption op, PetscBool flg)
{
PetscFunctionBegin;
switch (op) {
case MAT_FORM_EXPLICIT_TRANSPOSE:
/* need to destroy the transpose matrix if present to prevent from logic errors if flg is set to true later */
if (A->form_explicit_transpose && !flg) PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE));
A->form_explicit_transpose = flg;
break;
default:
PetscCall(MatSetOption_SeqAIJ(A, op, flg));
break;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(Mat A, PetscOptionItems *PetscOptionsObject)
{
MatCUSPARSEStorageFormat format;
PetscBool flg;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr;
PetscFunctionBegin;
PetscOptionsHeadBegin(PetscOptionsObject, "SeqAIJCUSPARSE options");
if (A->factortype == MAT_FACTOR_NONE) {
PetscCall(PetscOptionsEnum("-mat_cusparse_mult_storage_format", "sets storage format of (seq)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat", MatCUSPARSEStorageFormats, (PetscEnum)cusparsestruct->format, (PetscEnum *)&format, &flg));
if (flg) PetscCall(MatCUSPARSESetFormat(A, MAT_CUSPARSE_MULT, format));
PetscCall(PetscOptionsEnum("-mat_cusparse_storage_format", "sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", "MatCUSPARSESetFormat", MatCUSPARSEStorageFormats, (PetscEnum)cusparsestruct->format, (PetscEnum *)&format, &flg));
if (flg) PetscCall(MatCUSPARSESetFormat(A, MAT_CUSPARSE_ALL, format));
PetscCall(PetscOptionsBool("-mat_cusparse_use_cpu_solve", "Use CPU (I)LU solve", "MatCUSPARSESetUseCPUSolve", cusparsestruct->use_cpu_solve, &cusparsestruct->use_cpu_solve, &flg));
if (flg) PetscCall(MatCUSPARSESetUseCPUSolve(A, cusparsestruct->use_cpu_solve));
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscCall(PetscOptionsEnum("-mat_cusparse_spmv_alg", "sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)", "hipsparseSpMVAlg_t", MatCUSPARSESpMVAlgorithms, (PetscEnum)cusparsestruct->spmvAlg, (PetscEnum *)&cusparsestruct->spmvAlg, &flg));
/* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
PetscCheck(!flg || CUSPARSE_SPMV_CSR_ALG1 == 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum hipsparseSpMVAlg_t has been changed but PETSc has not been updated accordingly");
#else
PetscCheck(!flg || HIPSPARSE_CSRMV_ALG1 == 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum hipsparseSpMVAlg_t has been changed but PETSc has not been updated accordingly");
#endif
PetscCall(PetscOptionsEnum("-mat_cusparse_spmm_alg", "sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)", "hipsparseSpMMAlg_t", MatCUSPARSESpMMAlgorithms, (PetscEnum)cusparsestruct->spmmAlg, (PetscEnum *)&cusparsestruct->spmmAlg, &flg));
PetscCheck(!flg || HIPSPARSE_CSRMM_ALG1 == 4, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum hipsparseSpMMAlg_t has been changed but PETSc has not been updated accordingly");
PetscCall(
PetscOptionsEnum("-mat_cusparse_csr2csc_alg", "sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices", "hipsparseCsr2CscAlg_t", MatCUSPARSECsr2CscAlgorithms, (PetscEnum)cusparsestruct->csr2cscAlg, (PetscEnum *)&cusparsestruct->csr2cscAlg, &flg));
PetscCheck(!flg || HIPSPARSE_CSR2CSC_ALG1 == 1, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum hipsparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly");
#endif
}
PetscOptionsHeadEnd();
PetscFunctionReturn(PETSC_SUCCESS);
}
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
static PetscErrorCode MatSeqAIJCUSPARSEBuildFactoredMatrix_LU(Mat A)
{
Mat_SeqAIJ *a = static_cast<Mat_SeqAIJ *>(A->data);
PetscInt m = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr);
const PetscInt *Ai = a->i, *Aj = a->j, *Adiag = a->diag;
const MatScalar *Aa = a->a;
PetscInt *Mi, *Mj, Mnz;
PetscScalar *Ma;
PetscFunctionBegin;
if (A->offloadmask == PETSC_OFFLOAD_CPU) { // A's latest factors are on CPU
if (!fs->csrRowPtr) { // Is't the first time to do the setup? Use csrRowPtr since it is not null even when m=0
// Re-arrange the (skewed) factored matrix and put the result into M, a regular csr matrix on host
Mnz = (Ai[m] - Ai[0]) + (Adiag[0] - Adiag[m]); // Lnz (without the unit diagonal) + Unz (with the non-unit diagonal)
PetscCall(PetscMalloc1(m + 1, &Mi));
PetscCall(PetscMalloc1(Mnz, &Mj)); // Mj is temp
PetscCall(PetscMalloc1(Mnz, &Ma));
Mi[0] = 0;
for (PetscInt i = 0; i < m; i++) {
PetscInt llen = Ai[i + 1] - Ai[i];
PetscInt ulen = Adiag[i] - Adiag[i + 1];
PetscCall(PetscArraycpy(Mj + Mi[i], Aj + Ai[i], llen)); // entries of L
Mj[Mi[i] + llen] = i; // diagonal entry
PetscCall(PetscArraycpy(Mj + Mi[i] + llen + 1, Aj + Adiag[i + 1] + 1, ulen - 1)); // entries of U on the right of the diagonal
Mi[i + 1] = Mi[i] + llen + ulen;
}
// Copy M (L,U) from host to device
PetscCallCUDA(hipMalloc(&fs->csrRowPtr, sizeof(*(fs->csrRowPtr)) * (m + 1)));
PetscCallCUDA(hipMalloc(&fs->csrColIdx, sizeof(*(fs->csrColIdx)) * Mnz));
PetscCallCUDA(hipMalloc(&fs->csrVal, sizeof(*(fs->csrVal)) * Mnz));
PetscCallCUDA(hipMemcpy(fs->csrRowPtr, Mi, sizeof(*(fs->csrRowPtr)) * (m + 1), hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(fs->csrColIdx, Mj, sizeof(*(fs->csrColIdx)) * Mnz, hipMemcpyHostToDevice));
// Create descriptors for L, U. See https://docs.nvidia.com/cuda/cusparse/index.html#hipsparseDiagType_t
// hipsparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always
// assumed to be present, but if HIPSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that
// all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine
// assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory.
hipsparseFillMode_t fillMode = HIPSPARSE_FILL_MODE_LOWER;
hipsparseDiagType_t diagType = HIPSPARSE_DIAG_TYPE_UNIT;
const hipsparseIndexType_t indexType = PetscDefined(USE_64BIT_INDICES) ? HIPSPARSE_INDEX_64I : HIPSPARSE_INDEX_32I;
PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_L, m, m, Mnz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, indexType, indexType, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype));
PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode)));
PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType)));
fillMode = HIPSPARSE_FILL_MODE_UPPER;
diagType = HIPSPARSE_DIAG_TYPE_NON_UNIT;
PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_U, m, m, Mnz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, indexType, indexType, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype));
PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_U, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode)));
PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_U, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType)));
// Allocate work vectors in SpSv
PetscCallCUDA(hipMalloc((void **)&fs->X, sizeof(*(fs->X)) * m));
PetscCallCUDA(hipMalloc((void **)&fs->Y, sizeof(*(fs->Y)) * m));
PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype));
PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype));
// Query buffer sizes for SpSV and then allocate buffers, temporarily assuming opA = HIPSPARSE_OPERATION_NON_TRANSPOSE
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L));
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_U));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, &fs->spsvBufferSize_U));
PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_U, fs->spsvBufferSize_U));
PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L));
// Record for reuse
fs->csrRowPtr_h = Mi;
fs->csrVal_h = Ma;
PetscCall(PetscFree(Mj));
}
// Copy the value
Mi = fs->csrRowPtr_h;
Ma = fs->csrVal_h;
Mnz = Mi[m];
for (PetscInt i = 0; i < m; i++) {
PetscInt llen = Ai[i + 1] - Ai[i];
PetscInt ulen = Adiag[i] - Adiag[i + 1];
PetscCall(PetscArraycpy(Ma + Mi[i], Aa + Ai[i], llen)); // entries of L
Ma[Mi[i] + llen] = (MatScalar)1.0 / Aa[Adiag[i]]; // recover the diagonal entry
PetscCall(PetscArraycpy(Ma + Mi[i] + llen + 1, Aa + Adiag[i + 1] + 1, ulen - 1)); // entries of U on the right of the diagonal
}
PetscCallCUDA(hipMemcpy(fs->csrVal, Ma, sizeof(*Ma) * Mnz, hipMemcpyHostToDevice));
// Do cusparseSpSV_analysis(), which is numeric and requires valid and up-to-date matrix values
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L));
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, fs->spsvBuffer_U));
// L, U values have changed, reset the flag to indicate we need to redo cusparseSpSV_analysis() for transpose solve
fs->updatedTransposeSpSVAnalysis = PETSC_FALSE;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
#else
static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr;
const PetscInt *ai = a->i, *aj = a->j, *vi;
const MatScalar *aa = a->a, *v;
PetscInt *AiLo, *AjLo;
PetscInt i, nz, nzLower, offset, rowOffset;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(PETSC_SUCCESS);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */
nzLower = n + ai[n] - ai[1];
if (!loTriFactor) {
PetscScalar *AALo;
PetscCallCUDA(hipHostMalloc((void **)&AALo, nzLower * sizeof(PetscScalar)));
/* Allocate Space for the lower triangular matrix */
PetscCallCUDA(hipHostMalloc((void **)&AiLo, (n + 1) * sizeof(PetscInt)));
PetscCallCUDA(hipHostMalloc((void **)&AjLo, nzLower * sizeof(PetscInt)));
/* Fill the lower triangular matrix */
AiLo[0] = (PetscInt)0;
AiLo[n] = nzLower;
AjLo[0] = (PetscInt)0;
AALo[0] = (MatScalar)1.0;
v = aa;
vi = aj;
offset = 1;
rowOffset = 1;
for (i = 1; i < n; i++) {
nz = ai[i + 1] - ai[i];
/* additional 1 for the term on the diagonal */
AiLo[i] = rowOffset;
rowOffset += nz + 1;
PetscCall(PetscArraycpy(&(AjLo[offset]), vi, nz));
PetscCall(PetscArraycpy(&(AALo[offset]), v, nz));
offset += nz;
AjLo[offset] = (PetscInt)i;
AALo[offset] = (MatScalar)1.0;
offset += 1;
v += nz;
vi += nz;
}
/* allocate space for the triangular factor information */
PetscCall(PetscNew(&loTriFactor));
loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
PetscCallCUSPARSE(hipsparseCreateMatDescr(&loTriFactor->descr));
PetscCallCUSPARSE(hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
#else
PetscCallCUSPARSE(hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR));
#endif
PetscCallCUSPARSE(hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_LOWER));
PetscCallCUSPARSE(hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT));
/* set the operation */
loTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = n;
loTriFactor->csrMat->num_cols = n;
loTriFactor->csrMat->num_entries = nzLower;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n + 1);
loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo + n + 1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower);
loTriFactor->csrMat->column_indices->assign(AjLo, AjLo + nzLower);
loTriFactor->csrMat->values = new THRUSTARRAY(nzLower);
loTriFactor->csrMat->values->assign(AALo, AALo + nzLower);
/* Create the solve analysis information */
PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactor->solveInfo));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize));
PetscCallCUDA(hipMalloc(&loTriFactor->solveBuffer, loTriFactor->solveBufferSize));
#endif
/* perform the solve analysis */
PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer));
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtr = loTriFactor;
loTriFactor->AA_h = AALo;
PetscCallCUDA(hipHostFree(AiLo));
PetscCallCUDA(hipHostFree(AjLo));
PetscCall(PetscLogCpuToGpu((n + 1 + nzLower) * sizeof(int) + nzLower * sizeof(PetscScalar)));
} else { /* update values only */
if (!loTriFactor->AA_h) PetscCallCUDA(hipHostMalloc((void **)&loTriFactor->AA_h, nzLower * sizeof(PetscScalar)));
/* Fill the lower triangular matrix */
loTriFactor->AA_h[0] = 1.0;
v = aa;
vi = aj;
offset = 1;
for (i = 1; i < n; i++) {
nz = ai[i + 1] - ai[i];
PetscCall(PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz));
offset += nz;
loTriFactor->AA_h[offset] = 1.0;
offset += 1;
v += nz;
}
loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h + nzLower);
PetscCall(PetscLogCpuToGpu(nzLower * sizeof(PetscScalar)));
}
} catch (char *ex) {
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr;
const PetscInt *aj = a->j, *adiag = a->diag, *vi;
const MatScalar *aa = a->a, *v;
PetscInt *AiUp, *AjUp;
PetscInt i, nz, nzUpper, offset;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(PETSC_SUCCESS);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* next, figure out the number of nonzeros in the upper triangular matrix. */
nzUpper = adiag[0] - adiag[n];
if (!upTriFactor) {
PetscScalar *AAUp;
PetscCallCUDA(hipHostMalloc((void **)&AAUp, nzUpper * sizeof(PetscScalar)));
/* Allocate Space for the upper triangular matrix */
PetscCallCUDA(hipHostMalloc((void **)&AiUp, (n + 1) * sizeof(PetscInt)));
PetscCallCUDA(hipHostMalloc((void **)&AjUp, nzUpper * sizeof(PetscInt)));
/* Fill the upper triangular matrix */
AiUp[0] = (PetscInt)0;
AiUp[n] = nzUpper;
offset = nzUpper;
for (i = n - 1; i >= 0; i--) {
v = aa + adiag[i + 1] + 1;
vi = aj + adiag[i + 1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i + 1] - 1;
/* decrement the offset */
offset -= (nz + 1);
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt)i;
AAUp[offset] = (MatScalar)1. / v[nz];
AiUp[i] = AiUp[i + 1] - (nz + 1);
PetscCall(PetscArraycpy(&(AjUp[offset + 1]), vi, nz));
PetscCall(PetscArraycpy(&(AAUp[offset + 1]), v, nz));
}
/* allocate space for the triangular factor information */
PetscCall(PetscNew(&upTriFactor));
upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
PetscCallCUSPARSE(hipsparseCreateMatDescr(&upTriFactor->descr));
PetscCallCUSPARSE(hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
#else
PetscCallCUSPARSE(hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR));
#endif
PetscCallCUSPARSE(hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER));
PetscCallCUSPARSE(hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT));
/* set the operation */
upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = n;
upTriFactor->csrMat->num_cols = n;
upTriFactor->csrMat->num_entries = nzUpper;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n + 1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + n + 1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp + nzUpper);
upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper);
upTriFactor->csrMat->values->assign(AAUp, AAUp + nzUpper);
/* Create the solve analysis information */
PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactor->solveInfo));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize));
PetscCallCUDA(hipMalloc(&upTriFactor->solveBuffer, upTriFactor->solveBufferSize));
#endif
/* perform the solve analysis */
PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer));
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtr = upTriFactor;
upTriFactor->AA_h = AAUp;
PetscCallCUDA(hipHostFree(AiUp));
PetscCallCUDA(hipHostFree(AjUp));
PetscCall(PetscLogCpuToGpu((n + 1 + nzUpper) * sizeof(int) + nzUpper * sizeof(PetscScalar)));
} else {
if (!upTriFactor->AA_h) PetscCallCUDA(hipHostMalloc((void **)&upTriFactor->AA_h, nzUpper * sizeof(PetscScalar)));
/* Fill the upper triangular matrix */
offset = nzUpper;
for (i = n - 1; i >= 0; i--) {
v = aa + adiag[i + 1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i + 1] - 1;
/* decrement the offset */
offset -= (nz + 1);
/* first, set the diagonal elements */
upTriFactor->AA_h[offset] = 1. / v[nz];
PetscCall(PetscArraycpy(&(upTriFactor->AA_h[offset + 1]), v, nz));
}
upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h + nzUpper);
PetscCall(PetscLogCpuToGpu(nzUpper * sizeof(PetscScalar)));
}
} catch (char *ex) {
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(PETSC_SUCCESS);
}
#endif
static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
IS isrow = a->row, iscol = a->icol;
PetscBool row_identity, col_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors");
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
PetscCall(MatSeqAIJCUSPARSEBuildFactoredMatrix_LU(A));
#else
PetscCall(MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A));
PetscCall(MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A));
if (!cusparseTriFactors->workVector) cusparseTriFactors->workVector = new THRUSTARRAY(n);
#endif
cusparseTriFactors->nnz = a->nz;
A->offloadmask = PETSC_OFFLOAD_BOTH; // factored matrix is sync'ed to GPU
/* lower triangular indices */
PetscCall(ISIdentity(isrow, &row_identity));
if (!row_identity && !cusparseTriFactors->rpermIndices) {
const PetscInt *r;
PetscCall(ISGetIndices(isrow, &r));
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(r, r + n);
PetscCall(ISRestoreIndices(isrow, &r));
PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt)));
}
/* upper triangular indices */
PetscCall(ISIdentity(iscol, &col_identity));
if (!col_identity && !cusparseTriFactors->cpermIndices) {
const PetscInt *c;
PetscCall(ISGetIndices(iscol, &c));
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(c, c + n);
PetscCall(ISRestoreIndices(iscol, &c));
PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt)));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
static PetscErrorCode MatSeqAIJCUSPARSEBuildFactoredMatrix_Cheolesky(Mat A)
{
Mat_SeqAIJ *a = static_cast<Mat_SeqAIJ *>(A->data);
PetscInt m = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr);
const PetscInt *Ai = a->i, *Aj = a->j, *Adiag = a->diag;
const MatScalar *Aa = a->a;
PetscInt *Mj, Mnz;
PetscScalar *Ma, *D;
PetscFunctionBegin;
if (A->offloadmask == PETSC_OFFLOAD_CPU) { // A's latest factors are on CPU
if (!fs->csrRowPtr) { // Is't the first time to do the setup? Use csrRowPtr since it is not null even m=0
// Re-arrange the (skewed) factored matrix and put the result into M, a regular csr matrix on host.
// See comments at MatICCFactorSymbolic_SeqAIJ() on the layout of the factored matrix (U) on host.
Mnz = Ai[m]; // Unz (with the unit diagonal)
PetscCall(PetscMalloc1(Mnz, &Ma));
PetscCall(PetscMalloc1(Mnz, &Mj)); // Mj[] is temp
PetscCall(PetscMalloc1(m, &D)); // the diagonal
for (PetscInt i = 0; i < m; i++) {
PetscInt ulen = Ai[i + 1] - Ai[i];
Mj[Ai[i]] = i; // diagonal entry
PetscCall(PetscArraycpy(Mj + Ai[i] + 1, Aj + Ai[i], ulen - 1)); // entries of U on the right of the diagonal
}
// Copy M (U) from host to device
PetscCallCUDA(hipMalloc(&fs->csrRowPtr, sizeof(*(fs->csrRowPtr)) * (m + 1)));
PetscCallCUDA(hipMalloc(&fs->csrColIdx, sizeof(*(fs->csrColIdx)) * Mnz));
PetscCallCUDA(hipMalloc(&fs->csrVal, sizeof(*(fs->csrVal)) * Mnz));
PetscCallCUDA(hipMalloc(&fs->diag, sizeof(*(fs->diag)) * m));
PetscCallCUDA(hipMemcpy(fs->csrRowPtr, Ai, sizeof(*Ai) * (m + 1), hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(fs->csrColIdx, Mj, sizeof(*Mj) * Mnz, hipMemcpyHostToDevice));
// Create descriptors for L, U. See https://docs.nvidia.com/cuda/cusparse/index.html#hipsparseDiagType_t
// hipsparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always
// assumed to be present, but if HIPSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that
// all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine
// assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory.
hipsparseFillMode_t fillMode = HIPSPARSE_FILL_MODE_UPPER;
hipsparseDiagType_t diagType = HIPSPARSE_DIAG_TYPE_UNIT; // U is unit diagonal
const hipsparseIndexType_t indexType = PetscDefined(USE_64BIT_INDICES) ? HIPSPARSE_INDEX_64I : HIPSPARSE_INDEX_32I;
PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_U, m, m, Mnz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, indexType, indexType, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype));
PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_U, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode)));
PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_U, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType)));
// Allocate work vectors in SpSv
PetscCallCUDA(hipMalloc((void **)&fs->X, sizeof(*(fs->X)) * m));
PetscCallCUDA(hipMalloc((void **)&fs->Y, sizeof(*(fs->Y)) * m));
PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype));
PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype));
// Query buffer sizes for SpSV and then allocate buffers
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_U));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, &fs->spsvBufferSize_U));
PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_U, fs->spsvBufferSize_U));
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Ut)); // Ut solve uses the same matrix (spMatDescr_U), but different descr and buffer
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Ut, &fs->spsvBufferSize_Ut));
PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_Ut, fs->spsvBufferSize_Ut));
// Record for reuse
fs->csrVal_h = Ma;
fs->diag_h = D;
PetscCall(PetscFree(Mj));
}
// Copy the value
Ma = fs->csrVal_h;
D = fs->diag_h;
Mnz = Ai[m];
for (PetscInt i = 0; i < m; i++) {
D[i] = Aa[Adiag[i]]; // actually Aa[Adiag[i]] is the inverse of the diagonal
Ma[Ai[i]] = (MatScalar)1.0; // set the unit diagonal, which is cosmetic since cusparse does not really read it given HIPSPARSE_DIAG_TYPE_UNIT
for (PetscInt k = 0; k < Ai[i + 1] - Ai[i] - 1; k++) Ma[Ai[i] + 1 + k] = -Aa[Ai[i] + k];
}
PetscCallCUDA(hipMemcpy(fs->csrVal, Ma, sizeof(*Ma) * Mnz, hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(fs->diag, D, sizeof(*D) * m, hipMemcpyHostToDevice));
// Do cusparseSpSV_analysis(), which is numeric and requires valid and up-to-date matrix values
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, fs->spsvBuffer_U));
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Ut, fs->spsvBuffer_Ut));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
// Solve Ut D U x = b
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_Cholesky(Mat A, Vec b, Vec x)
{
Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr);
Mat_SeqAIJ *aij = static_cast<Mat_SeqAIJ *>(A->data);
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
const cusparseSpSVAlg_t alg = CUSPARSE_SPSV_ALG_DEFAULT;
PetscInt m = A->rmap->n;
PetscFunctionBegin;
PetscCall(PetscLogGpuTimeBegin());
PetscCall(VecCUDAGetArrayWrite(x, &xarray));
PetscCall(VecCUDAGetArrayRead(b, &barray));
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
// Reorder b with the row permutation if needed, and wrap the result in fs->X
if (fs->rpermIndices) {
PetscCallThrust(thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->end()), thrust::device_pointer_cast(fs->X)));
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, fs->X));
} else {
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray));
}
// Solve Ut Y = X
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y));
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut));
// Solve diag(D) Z = Y. Actually just do Y = Y*D since D is already inverted in MatCholeskyFactorNumeric_SeqAIJ().
// It is basically a vector element-wise multiplication, but cublas does not have it!
PetscCallThrust(thrust::transform(thrust::hip::par.on(PetscDefaultCudaStream), thrust::device_pointer_cast(fs->Y), thrust::device_pointer_cast(fs->Y + m), thrust::device_pointer_cast(fs->diag), thrust::device_pointer_cast(fs->Y), thrust::multiplies<PetscScalar>()));
// Solve U X = Y
if (fs->cpermIndices) { // if need to permute, we need to use the intermediate buffer X
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, fs->X));
} else {
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, xarray));
}
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, alg, fs->spsvDescr_U));
// Reorder X with the column permutation if needed, and put the result back to x
if (fs->cpermIndices) {
PetscCallThrust(thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X), fs->cpermIndices->begin()),
thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X + m), fs->cpermIndices->end()), xGPU));
}
PetscCall(VecCUDARestoreArrayRead(b, &barray));
PetscCall(VecCUDARestoreArrayWrite(x, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(4.0 * aij->nz - A->rmap->n));
PetscFunctionReturn(PETSC_SUCCESS);
}
#else
static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr;
PetscInt *AiUp, *AjUp;
PetscScalar *AAUp;
PetscScalar *AALo;
PetscInt nzUpper = a->nz, n = A->rmap->n, i, offset, nz, j;
Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ *)A->data;
const PetscInt *ai = b->i, *aj = b->j, *vj;
const MatScalar *aa = b->a, *v;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(PETSC_SUCCESS);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
PetscCallCUDA(hipHostMalloc((void **)&AAUp, nzUpper * sizeof(PetscScalar)));
PetscCallCUDA(hipHostMalloc((void **)&AALo, nzUpper * sizeof(PetscScalar)));
if (!upTriFactor && !loTriFactor) {
/* Allocate Space for the upper triangular matrix */
PetscCallCUDA(hipHostMalloc((void **)&AiUp, (n + 1) * sizeof(PetscInt)));
PetscCallCUDA(hipHostMalloc((void **)&AjUp, nzUpper * sizeof(PetscInt)));
/* Fill the upper triangular matrix */
AiUp[0] = (PetscInt)0;
AiUp[n] = nzUpper;
offset = 0;
for (i = 0; i < n; i++) {
/* set the pointers */
v = aa + ai[i];
vj = aj + ai[i];
nz = ai[i + 1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt)i;
AAUp[offset] = (MatScalar)1.0 / v[nz];
AiUp[i] = offset;
AALo[offset] = (MatScalar)1.0 / v[nz];
offset += 1;
if (nz > 0) {
PetscCall(PetscArraycpy(&(AjUp[offset]), vj, nz));
PetscCall(PetscArraycpy(&(AAUp[offset]), v, nz));
for (j = offset; j < offset + nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j] / v[nz];
}
offset += nz;
}
}
/* allocate space for the triangular factor information */
PetscCall(PetscNew(&upTriFactor));
upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
PetscCallCUSPARSE(hipsparseCreateMatDescr(&upTriFactor->descr));
PetscCallCUSPARSE(hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
#else
PetscCallCUSPARSE(hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR));
#endif
PetscCallCUSPARSE(hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER));
PetscCallCUSPARSE(hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT));
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = A->rmap->n;
upTriFactor->csrMat->num_cols = A->cmap->n;
upTriFactor->csrMat->num_entries = a->nz;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + A->rmap->n + 1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp + a->nz);
upTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
upTriFactor->csrMat->values->assign(AAUp, AAUp + a->nz);
/* set the operation */
upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* Create the solve analysis information */
PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactor->solveInfo));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize));
PetscCallCUDA(hipMalloc(&upTriFactor->solveBuffer, upTriFactor->solveBufferSize));
#endif
/* perform the solve analysis */
PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer));
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtr = upTriFactor;
/* allocate space for the triangular factor information */
PetscCall(PetscNew(&loTriFactor));
loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
PetscCallCUSPARSE(hipsparseCreateMatDescr(&loTriFactor->descr));
PetscCallCUSPARSE(hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
#else
PetscCallCUSPARSE(hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR));
#endif
PetscCallCUSPARSE(hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER));
PetscCallCUSPARSE(hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT));
/* set the operation */
loTriFactor->solveOp = HIPSPARSE_OPERATION_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = A->rmap->n;
loTriFactor->csrMat->num_cols = A->cmap->n;
loTriFactor->csrMat->num_entries = a->nz;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1);
loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + A->rmap->n + 1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
loTriFactor->csrMat->column_indices->assign(AjUp, AjUp + a->nz);
loTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo + a->nz);
/* Create the solve analysis information */
PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactor->solveInfo));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize));
PetscCallCUDA(hipMalloc(&loTriFactor->solveBuffer, loTriFactor->solveBufferSize));
#endif
/* perform the solve analysis */
PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer));
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtr = loTriFactor;
PetscCall(PetscLogCpuToGpu(2 * (((A->rmap->n + 1) + (a->nz)) * sizeof(int) + (a->nz) * sizeof(PetscScalar))));
PetscCallCUDA(hipHostFree(AiUp));
PetscCallCUDA(hipHostFree(AjUp));
} else {
/* Fill the upper triangular matrix */
offset = 0;
for (i = 0; i < n; i++) {
/* set the pointers */
v = aa + ai[i];
nz = ai[i + 1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AAUp[offset] = 1.0 / v[nz];
AALo[offset] = 1.0 / v[nz];
offset += 1;
if (nz > 0) {
PetscCall(PetscArraycpy(&(AAUp[offset]), v, nz));
for (j = offset; j < offset + nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j] / v[nz];
}
offset += nz;
}
}
PetscCheck(upTriFactor, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors");
PetscCheck(loTriFactor, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors");
upTriFactor->csrMat->values->assign(AAUp, AAUp + a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo + a->nz);
PetscCall(PetscLogCpuToGpu(2 * (a->nz) * sizeof(PetscScalar)));
}
PetscCallCUDA(hipHostFree(AAUp));
PetscCallCUDA(hipHostFree(AALo));
} catch (char *ex) {
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(PETSC_SUCCESS);
}
#endif
static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
IS ip = a->row;
PetscBool perm_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors");
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
PetscCall(MatSeqAIJCUSPARSEBuildFactoredMatrix_Cheolesky(A));
#else
PetscCall(MatSeqAIJCUSPARSEBuildICCTriMatrices(A));
if (!cusparseTriFactors->workVector) cusparseTriFactors->workVector = new THRUSTARRAY(n);
#endif
cusparseTriFactors->nnz = (a->nz - n) * 2 + n;
A->offloadmask = PETSC_OFFLOAD_BOTH;
/* lower triangular indices */
PetscCall(ISIdentity(ip, &perm_identity));
if (!perm_identity) {
IS iip;
const PetscInt *irip, *rip;
PetscCall(ISInvertPermutation(ip, PETSC_DECIDE, &iip));
PetscCall(ISGetIndices(iip, &irip));
PetscCall(ISGetIndices(ip, &rip));
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(rip, rip + n);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(irip, irip + n);
PetscCall(ISRestoreIndices(iip, &irip));
PetscCall(ISDestroy(&iip));
PetscCall(ISRestoreIndices(ip, &rip));
PetscCall(PetscLogCpuToGpu(2. * n * sizeof(PetscInt)));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B, Mat A, const MatFactorInfo *info)
{
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A));
PetscCall(MatCholeskyFactorNumeric_SeqAIJ(B, A, info));
B->offloadmask = PETSC_OFFLOAD_CPU;
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
B->ops->solve = MatSolve_SeqAIJCUSPARSE_Cholesky;
B->ops->solvetranspose = MatSolve_SeqAIJCUSPARSE_Cholesky;
#else
/* determine which version of MatSolve needs to be used. */
Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data;
IS ip = b->row;
PetscBool perm_identity;
PetscCall(ISIdentity(ip, &perm_identity));
if (perm_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
}
#endif
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
/* get the triangular factors */
PetscCall(MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B));
PetscFunctionReturn(PETSC_SUCCESS);
}
#if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0)
static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT;
hipsparseIndexBase_t indexBase;
hipsparseMatrixType_t matrixType;
hipsparseFillMode_t fillMode;
hipsparseDiagType_t diagType;
PetscFunctionBegin;
/* allocate space for the transpose of the lower triangular factor */
PetscCall(PetscNew(&loTriFactorT));
loTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the lower triangular factor */
matrixType = cusparseGetMatType(loTriFactor->descr);
indexBase = cusparseGetMatIndexBase(loTriFactor->descr);
fillMode = cusparseGetMatFillMode(loTriFactor->descr) == HIPSPARSE_FILL_MODE_UPPER ? HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(loTriFactor->descr);
/* Create the matrix description */
PetscCallCUSPARSE(hipsparseCreateMatDescr(&loTriFactorT->descr));
PetscCallCUSPARSE(hipsparseSetMatIndexBase(loTriFactorT->descr, indexBase));
PetscCallCUSPARSE(hipsparseSetMatType(loTriFactorT->descr, matrixType));
PetscCallCUSPARSE(hipsparseSetMatFillMode(loTriFactorT->descr, fillMode));
PetscCallCUSPARSE(hipsparseSetMatDiagType(loTriFactorT->descr, diagType));
/* set the operation */
loTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the lower triangular factor*/
loTriFactorT->csrMat = new CsrMatrix;
loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols;
loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows;
loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries;
loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows + 1);
loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries);
loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries);
/* compute the transpose of the lower triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscCallCUSPARSE(hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize));
PetscCallCUDA(hipMalloc(&loTriFactor->csr2cscBuffer, loTriFactor->csr2cscBufferSize));
#endif
PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0));
{
// there is no clean way to have PetscCallCUSPARSE wrapping this function...
auto stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer);
#else
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase);
#endif
PetscCallCUSPARSE(stat);
}
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0));
/* Create the solve analysis information */
PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactorT->solveInfo));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, &loTriFactorT->solveBufferSize));
PetscCallCUDA(hipMalloc(&loTriFactorT->solveBuffer, loTriFactorT->solveBufferSize));
#endif
/* perform the solve analysis */
PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer));
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtrTranspose = loTriFactorT;
/*********************************************/
/* Now the Transpose of the Upper Tri Factor */
/*********************************************/
/* allocate space for the transpose of the upper triangular factor */
PetscCall(PetscNew(&upTriFactorT));
upTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the upper triangular factor */
matrixType = cusparseGetMatType(upTriFactor->descr);
indexBase = cusparseGetMatIndexBase(upTriFactor->descr);
fillMode = cusparseGetMatFillMode(upTriFactor->descr) == HIPSPARSE_FILL_MODE_UPPER ? HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(upTriFactor->descr);
/* Create the matrix description */
PetscCallCUSPARSE(hipsparseCreateMatDescr(&upTriFactorT->descr));
PetscCallCUSPARSE(hipsparseSetMatIndexBase(upTriFactorT->descr, indexBase));
PetscCallCUSPARSE(hipsparseSetMatType(upTriFactorT->descr, matrixType));
PetscCallCUSPARSE(hipsparseSetMatFillMode(upTriFactorT->descr, fillMode));
PetscCallCUSPARSE(hipsparseSetMatDiagType(upTriFactorT->descr, diagType));
/* set the operation */
upTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the upper triangular factor*/
upTriFactorT->csrMat = new CsrMatrix;
upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols;
upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows;
upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries;
upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows + 1);
upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries);
upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries);
/* compute the transpose of the upper triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscCallCUSPARSE(hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize));
PetscCallCUDA(hipMalloc(&upTriFactor->csr2cscBuffer, upTriFactor->csr2cscBufferSize));
#endif
PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0));
{
// there is no clean way to have PetscCallCUSPARSE wrapping this function...
auto stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer);
#else
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase);
#endif
PetscCallCUSPARSE(stat);
}
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0));
/* Create the solve analysis information */
PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactorT->solveInfo));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, &upTriFactorT->solveBufferSize));
PetscCallCUDA(hipMalloc(&upTriFactorT->solveBuffer, upTriFactorT->solveBufferSize));
#endif
/* perform the solve analysis */
/* christ, would it have killed you to put this stuff in a function????????? */
PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, upTriFactorT->solvePolicy, upTriFactorT->solveBuffer));
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtrTranspose = upTriFactorT;
PetscFunctionReturn(PETSC_SUCCESS);
}
#endif
struct PetscScalarToPetscInt {
__host__ __device__ PetscInt operator()(PetscScalar s) { return (PetscInt)PetscRealPart(s); }
};
static PetscErrorCode MatSeqAIJCUSPARSEFormExplicitTranspose(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct, *matstructT;
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
hipsparseStatus_t stat;
hipsparseIndexBase_t indexBase;
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat;
PetscCheck(matstruct, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing mat struct");
matstructT = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->matTranspose;
PetscCheck(!A->transupdated || matstructT, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing matTranspose struct");
if (A->transupdated) PetscFunctionReturn(PETSC_SUCCESS);
PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0));
PetscCall(PetscLogGpuTimeBegin());
if (cusparsestruct->format != MAT_CUSPARSE_CSR) PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE));
if (!cusparsestruct->matTranspose) { /* create cusparse matrix */
matstructT = new Mat_SeqAIJCUSPARSEMultStruct;
PetscCallCUSPARSE(hipsparseCreateMatDescr(&matstructT->descr));
indexBase = cusparseGetMatIndexBase(matstruct->descr);
PetscCallCUSPARSE(hipsparseSetMatIndexBase(matstructT->descr, indexBase));
PetscCallCUSPARSE(hipsparseSetMatType(matstructT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
/* set alpha and beta */
PetscCallCUDA(hipMalloc((void **)&(matstructT->alpha_one), sizeof(PetscScalar)));
PetscCallCUDA(hipMalloc((void **)&(matstructT->beta_zero), sizeof(PetscScalar)));
PetscCallCUDA(hipMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar)));
PetscCallCUDA(hipMemcpy(matstructT->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(matstructT->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice));
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *matrixT = new CsrMatrix;
matstructT->mat = matrixT;
matrixT->num_rows = A->cmap->n;
matrixT->num_cols = A->rmap->n;
matrixT->num_entries = a->nz;
matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows + 1);
matrixT->column_indices = new THRUSTINTARRAY32(a->nz);
matrixT->values = new THRUSTARRAY(a->nz);
if (!cusparsestruct->rowoffsets_gpu) cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
cusparsestruct->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1);
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
#if PETSC_PKG_CUDA_VERSION_GE(11, 2, 1)
stat = hipsparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */
indexBase, cusparse_scalartype);
PetscCallCUSPARSE(stat);
#else
/* cusparse-11.x returns errors with zero-sized matrices until 11.2.1,
see https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cusparse-11.2.1
I don't know what a proper value should be for matstructT->matDescr with empty matrices, so I just set
it to NULL to blow it up if one relies on it. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2,
when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I also set it accordingly.
*/
if (matrixT->num_entries) {
stat = hipsparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, indexBase, cusparse_scalartype);
PetscCallCUSPARSE(stat);
} else {
matstructT->matDescr = NULL;
matrixT->row_offsets->assign(matrixT->row_offsets->size(), indexBase);
}
#endif
#endif
} else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *temp = new CsrMatrix;
CsrMatrix *tempT = new CsrMatrix;
/* First convert HYB to CSR */
temp->num_rows = A->rmap->n;
temp->num_cols = A->cmap->n;
temp->num_entries = a->nz;
temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1);
temp->column_indices = new THRUSTINTARRAY32(a->nz);
temp->values = new THRUSTARRAY(a->nz);
stat = cusparse_hyb2csr(cusparsestruct->handle, matstruct->descr, (cusparseHybMat_t)matstruct->mat, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get());
PetscCallCUSPARSE(stat);
/* Next, convert CSR to CSC (i.e. the matrix transpose) */
tempT->num_rows = A->rmap->n;
tempT->num_cols = A->cmap->n;
tempT->num_entries = a->nz;
tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1);
tempT->column_indices = new THRUSTINTARRAY32(a->nz);
tempT->values = new THRUSTARRAY(a->nz);
stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, temp->num_cols, temp->num_entries, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get(), tempT->values->data().get(),
tempT->column_indices->data().get(), tempT->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase);
PetscCallCUSPARSE(stat);
/* Last, convert CSC to HYB */
cusparseHybMat_t hybMat;
PetscCallCUSPARSE(cusparseCreateHybMat(&hybMat));
cusparseHybPartition_t partition = cusparsestruct->format == MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, matstructT->descr, tempT->values->data().get(), tempT->row_offsets->data().get(), tempT->column_indices->data().get(), hybMat, 0, partition);
PetscCallCUSPARSE(stat);
/* assign the pointer */
matstructT->mat = hybMat;
A->transupdated = PETSC_TRUE;
/* delete temporaries */
if (tempT) {
if (tempT->values) delete (THRUSTARRAY *)tempT->values;
if (tempT->column_indices) delete (THRUSTINTARRAY32 *)tempT->column_indices;
if (tempT->row_offsets) delete (THRUSTINTARRAY32 *)tempT->row_offsets;
delete (CsrMatrix *)tempT;
}
if (temp) {
if (temp->values) delete (THRUSTARRAY *)temp->values;
if (temp->column_indices) delete (THRUSTINTARRAY32 *)temp->column_indices;
if (temp->row_offsets) delete (THRUSTINTARRAY32 *)temp->row_offsets;
delete (CsrMatrix *)temp;
}
#endif
}
}
if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* transpose mat struct may be already present, update data */
CsrMatrix *matrix = (CsrMatrix *)matstruct->mat;
CsrMatrix *matrixT = (CsrMatrix *)matstructT->mat;
PetscCheck(matrix, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix");
PetscCheck(matrix->row_offsets, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix rows");
PetscCheck(matrix->column_indices, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix cols");
PetscCheck(matrix->values, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix values");
PetscCheck(matrixT, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT");
PetscCheck(matrixT->row_offsets, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT rows");
PetscCheck(matrixT->column_indices, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT cols");
PetscCheck(matrixT->values, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT values");
if (!cusparsestruct->rowoffsets_gpu) { /* this may be absent when we did not construct the transpose with csr2csc */
cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
cusparsestruct->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1);
PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt)));
}
if (!cusparsestruct->csr2csc_i) {
THRUSTARRAY csr2csc_a(matrix->num_entries);
PetscCallThrust(thrust::sequence(thrust::device, csr2csc_a.begin(), csr2csc_a.end(), 0.0));
indexBase = cusparseGetMatIndexBase(matstruct->descr);
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
void *csr2cscBuffer;
size_t csr2cscBufferSize;
stat = hipsparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, matrix->values->data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(),
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, cusparsestruct->csr2cscAlg, &csr2cscBufferSize);
PetscCallCUSPARSE(stat);
PetscCallCUDA(hipMalloc(&csr2cscBuffer, csr2cscBufferSize));
#endif
if (matrix->num_entries) {
/* When there are no nonzeros, this routine mistakenly returns HIPSPARSE_STATUS_INVALID_VALUE in
mat_tests-ex62_15_mpiaijcusparse on ranks 0 and 2 with CUDA-11. But CUDA-10 is OK.
I checked every parameters and they were just fine. I have no clue why cusparse complains.
Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[]
should be filled with indexBase. So I just take a shortcut here.
*/
stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, csr2csc_a.data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, cusparsestruct->csr2cscAlg, csr2cscBuffer);
PetscCallCUSPARSE(stat);
#else
matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase);
PetscCallCUSPARSE(stat);
#endif
} else {
matrixT->row_offsets->assign(matrixT->row_offsets->size(), indexBase);
}
cusparsestruct->csr2csc_i = new THRUSTINTARRAY(matrix->num_entries);
PetscCallThrust(thrust::transform(thrust::device, matrixT->values->begin(), matrixT->values->end(), cusparsestruct->csr2csc_i->begin(), PetscScalarToPetscInt()));
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscCallCUDA(hipFree(csr2cscBuffer));
#endif
}
PetscCallThrust(
thrust::copy(thrust::device, thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->begin()), thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->end()), matrixT->values->begin()));
}
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0));
/* the compressed row indices is not used for matTranspose */
matstructT->cprowIndices = NULL;
/* assign the pointer */
((Mat_SeqAIJCUSPARSE *)A->spptr)->matTranspose = matstructT;
A->transupdated = PETSC_TRUE;
PetscFunctionReturn(PETSC_SUCCESS);
}
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_LU(Mat A, Vec b, Vec x)
{
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr);
const Mat_SeqAIJ *aij = static_cast<Mat_SeqAIJ *>(A->data);
const hipsparseOperation_t op = HIPSPARSE_OPERATION_NON_TRANSPOSE;
const cusparseSpSVAlg_t alg = CUSPARSE_SPSV_ALG_DEFAULT;
PetscInt m = A->rmap->n;
PetscFunctionBegin;
PetscCall(PetscLogGpuTimeBegin());
PetscCall(VecCUDAGetArrayWrite(x, &xarray));
PetscCall(VecCUDAGetArrayRead(b, &barray));
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
// Reorder b with the row permutation if needed, and wrap the result in fs->X
if (fs->rpermIndices) {
PetscCallThrust(thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->end()), thrust::device_pointer_cast(fs->X)));
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, fs->X));
} else {
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray));
}
// Solve L Y = X
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y));
// Note that cusparseSpSV_solve() secretly uses the external buffer used in cusparseSpSV_analysis()!
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, op, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_L));
// Solve U X = Y
if (fs->cpermIndices) {
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, fs->X));
} else {
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, xarray));
}
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, op, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, alg, fs->spsvDescr_U));
// Reorder X with the column permutation if needed, and put the result back to x
if (fs->cpermIndices) {
PetscCallThrust(thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X), fs->cpermIndices->begin()),
thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X + m), fs->cpermIndices->end()), xGPU));
}
PetscCall(VecCUDARestoreArrayRead(b, &barray));
PetscCall(VecCUDARestoreArrayWrite(x, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(2.0 * aij->nz - m));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_LU(Mat A, Vec b, Vec x)
{
Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr);
Mat_SeqAIJ *aij = static_cast<Mat_SeqAIJ *>(A->data);
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
const hipsparseOperation_t opA = HIPSPARSE_OPERATION_TRANSPOSE;
const cusparseSpSVAlg_t alg = CUSPARSE_SPSV_ALG_DEFAULT;
PetscInt m = A->rmap->n;
PetscFunctionBegin;
PetscCall(PetscLogGpuTimeBegin());
if (!fs->createdTransposeSpSVDescr) { // Call MatSolveTranspose() for the first time
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Lt));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* The matrix is still L. We only do transpose solve with it */
fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Lt, &fs->spsvBufferSize_Lt));
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Ut));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut, &fs->spsvBufferSize_Ut));
PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_Lt, fs->spsvBufferSize_Lt));
PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_Ut, fs->spsvBufferSize_Ut));
fs->createdTransposeSpSVDescr = PETSC_TRUE;
}
if (!fs->updatedTransposeSpSVAnalysis) {
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Lt, fs->spsvBuffer_Lt));
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut, fs->spsvBuffer_Ut));
fs->updatedTransposeSpSVAnalysis = PETSC_TRUE;
}
PetscCall(VecCUDAGetArrayWrite(x, &xarray));
PetscCall(VecCUDAGetArrayRead(b, &barray));
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
// Reorder b with the row permutation if needed, and wrap the result in fs->X
if (fs->rpermIndices) {
PetscCallThrust(thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->end()), thrust::device_pointer_cast(fs->X)));
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, fs->X));
} else {
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray));
}
// Solve Ut Y = X
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y));
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut));
// Solve Lt X = Y
if (fs->cpermIndices) { // if need to permute, we need to use the intermediate buffer X
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, fs->X));
} else {
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, xarray));
}
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, alg, fs->spsvDescr_Lt));
// Reorder X with the column permutation if needed, and put the result back to x
if (fs->cpermIndices) {
PetscCallThrust(thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X), fs->cpermIndices->begin()),
thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X + m), fs->cpermIndices->end()), xGPU));
}
PetscCall(VecCUDARestoreArrayRead(b, &barray));
PetscCall(VecCUDARestoreArrayWrite(x, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(2.0 * aij->nz - A->rmap->n));
PetscFunctionReturn(PETSC_SUCCESS);
}
#else
/* Why do we need to analyze the transposed matrix again? Can't we just use op(A) = HIPSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A, Vec bb, Vec xx)
{
PetscInt n = xx->map->n;
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
PetscCall(MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A));
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
PetscCall(VecCUDAGetArrayWrite(xx, &xarray));
PetscCall(VecCUDAGetArrayRead(bb, &barray));
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
PetscCall(PetscLogGpuTimeBegin());
/* First, reorder with the row permutation */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU + n, cusparseTriFactors->rpermIndices->end()), xGPU);
/* First, solve U */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, xarray, tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer));
/* Then, solve L */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer));
/* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(xGPU + n, cusparseTriFactors->cpermIndices->end()), tempGPU->begin());
/* Copy the temporary to the full solution. */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), tempGPU->begin(), tempGPU->end(), xGPU);
/* restore */
PetscCall(VecCUDARestoreArrayRead(bb, &barray));
PetscCall(VecCUDARestoreArrayWrite(xx, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A, Vec bb, Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
PetscCall(MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A));
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
PetscCall(VecCUDAGetArrayWrite(xx, &xarray));
PetscCall(VecCUDAGetArrayRead(bb, &barray));
PetscCall(PetscLogGpuTimeBegin());
/* First, solve U */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, barray, tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer));
/* Then, solve L */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer));
/* restore */
PetscCall(VecCUDARestoreArrayRead(bb, &barray));
PetscCall(VecCUDARestoreArrayWrite(xx, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A, Vec bb, Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector;
PetscFunctionBegin;
/* Get the GPU pointers */
PetscCall(VecCUDAGetArrayWrite(xx, &xarray));
PetscCall(VecCUDAGetArrayRead(bb, &barray));
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
PetscCall(PetscLogGpuTimeBegin());
/* First, reorder with the row permutation */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), tempGPU->begin());
/* Next, solve L */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, tempGPU->data().get(), xarray, loTriFactor->solvePolicy, loTriFactor->solveBuffer));
/* Then, solve U */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, xarray, tempGPU->data().get(), upTriFactor->solvePolicy, upTriFactor->solveBuffer));
/* Last, reorder with the column permutation */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()), xGPU);
PetscCall(VecCUDARestoreArrayRead(bb, &barray));
PetscCall(VecCUDARestoreArrayWrite(xx, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A, Vec bb, Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector;
PetscFunctionBegin;
/* Get the GPU pointers */
PetscCall(VecCUDAGetArrayWrite(xx, &xarray));
PetscCall(VecCUDAGetArrayRead(bb, &barray));
PetscCall(PetscLogGpuTimeBegin());
/* First, solve L */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, barray, tempGPU->data().get(), loTriFactor->solvePolicy, loTriFactor->solveBuffer));
/* Next, solve U */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, tempGPU->data().get(), xarray, upTriFactor->solvePolicy, upTriFactor->solveBuffer));
PetscCall(VecCUDARestoreArrayRead(bb, &barray));
PetscCall(VecCUDARestoreArrayWrite(xx, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n));
PetscFunctionReturn(PETSC_SUCCESS);
}
#endif
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
static PetscErrorCode MatILUFactorNumeric_SeqAIJCUSPARSE_ILU0(Mat fact, Mat A, const MatFactorInfo *)
{
Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr;
Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data;
Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
CsrMatrix *Acsr;
PetscInt m, nz;
PetscBool flg;
PetscFunctionBegin;
if (PetscDefined(USE_DEBUG)) {
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name);
}
/* Copy A's value to fact */
m = fact->rmap->n;
nz = aij->nz;
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
Acsr = (CsrMatrix *)Acusp->mat->mat;
PetscCallCUDA(hipMemcpyAsync(fs->csrVal, Acsr->values->data().get(), sizeof(PetscScalar) * nz, hipMemcpyDeviceToDevice, PetscDefaultCudaStream));
/* Factorize fact inplace */
if (m)
PetscCallCUSPARSE(cusparseXcsrilu02(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */
fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ilu0Info_M, fs->policy_M, fs->factBuffer_M));
if (PetscDefined(USE_DEBUG)) {
int numerical_zero;
hipsparseStatus_t status;
status = hipsparseXcsrilu02_zeroPivot(fs->handle, fs->ilu0Info_M, &numerical_zero);
PetscAssert(HIPSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Numerical zero pivot detected in csrilu02: A(%d,%d) is zero", numerical_zero, numerical_zero);
}
/* cusparseSpSV_analysis() is numeric, i.e., it requires valid matrix values, therefore, we do it after cusparseXcsrilu02()
See discussion at https://github.com/NVIDIA/CUDALibrarySamples/issues/78
*/
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L));
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, fs->spsvBuffer_U));
/* L, U values have changed, reset the flag to indicate we need to redo cusparseSpSV_analysis() for transpose solve */
fs->updatedTransposeSpSVAnalysis = PETSC_FALSE;
fact->offloadmask = PETSC_OFFLOAD_GPU;
fact->ops->solve = MatSolve_SeqAIJCUSPARSE_LU; // spMatDescr_L/U uses 32-bit indices, but cusparseSpSV_solve() supports both 32 and 64. The info is encoded in hipsparseSpMatDescr_t.
fact->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_LU;
fact->ops->matsolve = NULL;
fact->ops->matsolvetranspose = NULL;
PetscCall(PetscLogGpuFlops(fs->numericFactFlops));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(Mat fact, Mat A, IS, IS, const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr;
Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data;
PetscInt m, nz;
PetscFunctionBegin;
if (PetscDefined(USE_DEBUG)) {
PetscInt i;
PetscBool flg, missing;
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name);
PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Must be square matrix, rows %" PetscInt_FMT " columns %" PetscInt_FMT, A->rmap->n, A->cmap->n);
PetscCall(MatMissingDiagonal(A, &missing, &i));
PetscCheck(!missing, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix is missing diagonal entry %" PetscInt_FMT, i);
}
/* Free the old stale stuff */
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&fs));
/* Copy over A's meta data to fact. Note that we also allocated fact's i,j,a on host,
but they will not be used. Allocate them just for easy debugging.
*/
PetscCall(MatDuplicateNoCreate_SeqAIJ(fact, A, MAT_DO_NOT_COPY_VALUES, PETSC_TRUE /*malloc*/));
fact->offloadmask = PETSC_OFFLOAD_BOTH;
fact->factortype = MAT_FACTOR_ILU;
fact->info.factor_mallocs = 0;
fact->info.fill_ratio_given = info->fill;
fact->info.fill_ratio_needed = 1.0;
aij->row = NULL;
aij->col = NULL;
/* ====================================================================== */
/* Copy A's i, j to fact and also allocate the value array of fact. */
/* We'll do in-place factorization on fact */
/* ====================================================================== */
const int *Ai, *Aj;
m = fact->rmap->n;
nz = aij->nz;
PetscCallCUDA(hipMalloc((void **)&fs->csrRowPtr32, sizeof(*(fs->csrRowPtr32)) * (m + 1)));
PetscCallCUDA(hipMalloc((void **)&fs->csrColIdx32, sizeof(*(fs->csrColIdx32)) * nz));
PetscCallCUDA(hipMalloc((void **)&fs->csrVal, sizeof(*(fs->csrVal)) * nz));
PetscCall(MatSeqAIJCUSPARSEGetIJ(A, PETSC_FALSE, &Ai, &Aj)); /* Do not use compressed Ai. The returned Ai, Aj are 32-bit */
PetscCallCUDA(hipMemcpyAsync(fs->csrRowPtr32, Ai, sizeof(*Ai) * (m + 1), hipMemcpyDeviceToDevice, PetscDefaultCudaStream));
PetscCallCUDA(hipMemcpyAsync(fs->csrColIdx32, Aj, sizeof(*Aj) * nz, hipMemcpyDeviceToDevice, PetscDefaultCudaStream));
/* ====================================================================== */
/* Create descriptors for M, L, U */
/* ====================================================================== */
hipsparseFillMode_t fillMode;
hipsparseDiagType_t diagType;
PetscCallCUSPARSE(hipsparseCreateMatDescr(&fs->matDescr_M));
PetscCallCUSPARSE(hipsparseSetMatIndexBase(fs->matDescr_M, HIPSPARSE_INDEX_BASE_ZERO));
PetscCallCUSPARSE(hipsparseSetMatType(fs->matDescr_M, HIPSPARSE_MATRIX_TYPE_GENERAL));
/* https://docs.nvidia.com/cuda/cusparse/index.html#hipsparseDiagType_t
hipsparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always
assumed to be present, but if HIPSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that
all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine
assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory.
*/
fillMode = HIPSPARSE_FILL_MODE_LOWER;
diagType = HIPSPARSE_DIAG_TYPE_UNIT;
PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_L, m, m, nz, fs->csrRowPtr32, fs->csrColIdx32, fs->csrVal, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype));
PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode)));
PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType)));
fillMode = HIPSPARSE_FILL_MODE_UPPER;
diagType = HIPSPARSE_DIAG_TYPE_NON_UNIT;
PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_U, m, m, nz, fs->csrRowPtr32, fs->csrColIdx32, fs->csrVal, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype));
PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_U, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode)));
PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_U, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType)));
/* ========================================================================= */
/* Query buffer sizes for csrilu0, SpSV and allocate buffers */
/* ========================================================================= */
PetscCallCUSPARSE(hipsparseCreateCsrilu02Info(&fs->ilu0Info_M));
if (m)
PetscCallCUSPARSE(cusparseXcsrilu02_bufferSize(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */
fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ilu0Info_M, &fs->factBufferSize_M));
PetscCallCUDA(hipMalloc((void **)&fs->X, sizeof(PetscScalar) * m));
PetscCallCUDA(hipMalloc((void **)&fs->Y, sizeof(PetscScalar) * m));
PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype));
PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype));
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L));
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_U));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, &fs->spsvBufferSize_U));
/* From my experiment with the example at https://github.com/NVIDIA/CUDALibrarySamples/tree/master/cuSPARSE/bicgstab,
and discussion at https://github.com/NVIDIA/CUDALibrarySamples/issues/77,
spsvBuffer_L/U can not be shared (i.e., the same) for our case, but factBuffer_M can share with either of spsvBuffer_L/U.
To save memory, we make factBuffer_M share with the bigger of spsvBuffer_L/U.
*/
if (fs->spsvBufferSize_L > fs->spsvBufferSize_U) {
PetscCallCUDA(hipMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_L, (size_t)fs->factBufferSize_M)));
fs->spsvBuffer_L = fs->factBuffer_M;
PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_U, fs->spsvBufferSize_U));
} else {
PetscCallCUDA(hipMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_U, (size_t)fs->factBufferSize_M)));
fs->spsvBuffer_U = fs->factBuffer_M;
PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L));
}
/* ========================================================================== */
/* Perform analysis of ilu0 on M, SpSv on L and U */
/* The lower(upper) triangular part of M has the same sparsity pattern as L(U)*/
/* ========================================================================== */
int structural_zero;
hipsparseStatus_t status;
fs->policy_M = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
if (m)
PetscCallCUSPARSE(cusparseXcsrilu02_analysis(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */
fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ilu0Info_M, fs->policy_M, fs->factBuffer_M));
if (PetscDefined(USE_DEBUG)) {
/* Function hipsparseXcsrilu02_zeroPivot() is a blocking call. It calls hipDeviceSynchronize() to make sure all previous kernels are done. */
status = hipsparseXcsrilu02_zeroPivot(fs->handle, fs->ilu0Info_M, &structural_zero);
PetscCheck(HIPSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Structural zero pivot detected in csrilu02: A(%d,%d) is missing", structural_zero, structural_zero);
}
/* Estimate FLOPs of the numeric factorization */
{
Mat_SeqAIJ *Aseq = (Mat_SeqAIJ *)A->data;
PetscInt *Ai, *Adiag, nzRow, nzLeft;
PetscLogDouble flops = 0.0;
PetscCall(MatMarkDiagonal_SeqAIJ(A));
Ai = Aseq->i;
Adiag = Aseq->diag;
for (PetscInt i = 0; i < m; i++) {
if (Ai[i] < Adiag[i] && Adiag[i] < Ai[i + 1]) { /* There are nonzeros left to the diagonal of row i */
nzRow = Ai[i + 1] - Ai[i];
nzLeft = Adiag[i] - Ai[i];
/* We want to eliminate nonzeros left to the diagonal one by one. Assume each time, nonzeros right
and include the eliminated one will be updated, which incurs a multiplication and an addition.
*/
nzLeft = (nzRow - 1) / 2;
flops += nzLeft * (2.0 * nzRow - nzLeft + 1);
}
}
fs->numericFactFlops = flops;
}
fact->ops->lufactornumeric = MatILUFactorNumeric_SeqAIJCUSPARSE_ILU0;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_ICC0(Mat fact, Vec b, Vec x)
{
Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr;
Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data;
const PetscScalar *barray;
PetscScalar *xarray;
PetscFunctionBegin;
PetscCall(VecCUDAGetArrayWrite(x, &xarray));
PetscCall(VecCUDAGetArrayRead(b, &barray));
PetscCall(PetscLogGpuTimeBegin());
/* Solve L*y = b */
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray));
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y));
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* L Y = X */
fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L));
/* Solve Lt*x = y */
PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, xarray));
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* Lt X = Y */
fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt));
PetscCall(VecCUDARestoreArrayRead(b, &barray));
PetscCall(VecCUDARestoreArrayWrite(x, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(2.0 * aij->nz - fact->rmap->n));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatICCFactorNumeric_SeqAIJCUSPARSE_ICC0(Mat fact, Mat A, const MatFactorInfo *)
{
Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr;
Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data;
Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
CsrMatrix *Acsr;
PetscInt m, nz;
PetscBool flg;
PetscFunctionBegin;
if (PetscDefined(USE_DEBUG)) {
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name);
}
/* Copy A's value to fact */
m = fact->rmap->n;
nz = aij->nz;
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
Acsr = (CsrMatrix *)Acusp->mat->mat;
PetscCallCUDA(hipMemcpyAsync(fs->csrVal, Acsr->values->data().get(), sizeof(PetscScalar) * nz, hipMemcpyDeviceToDevice, PetscDefaultCudaStream));
/* Factorize fact inplace */
/* https://docs.nvidia.com/cuda/cusparse/index.html#csric02_solve
Function csric02() only takes the lower triangular part of matrix A to perform factorization.
The matrix type must be HIPSPARSE_MATRIX_TYPE_GENERAL, the fill mode and diagonal type are ignored,
and the strictly upper triangular part is ignored and never touched. It does not matter if A is Hermitian or not.
In other words, from the point of view of csric02() A is Hermitian and only the lower triangular part is provided.
*/
if (m) PetscCallCUSPARSE(cusparseXcsric02(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ic0Info_M, fs->policy_M, fs->factBuffer_M));
if (PetscDefined(USE_DEBUG)) {
int numerical_zero;
hipsparseStatus_t status;
status = hipsparseXcsric02_zeroPivot(fs->handle, fs->ic0Info_M, &numerical_zero);
PetscAssert(HIPSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Numerical zero pivot detected in csric02: A(%d,%d) is zero", numerical_zero, numerical_zero);
}
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L));
/* Note that cusparse reports this error if we use double and HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE
** On entry to cusparseSpSV_analysis(): conjugate transpose (opA) is not supported for matA data type, current -> HIP_R_64F
*/
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, fs->spsvBuffer_Lt));
fact->offloadmask = PETSC_OFFLOAD_GPU;
fact->ops->solve = MatSolve_SeqAIJCUSPARSE_ICC0;
fact->ops->solvetranspose = MatSolve_SeqAIJCUSPARSE_ICC0;
fact->ops->matsolve = NULL;
fact->ops->matsolvetranspose = NULL;
PetscCall(PetscLogGpuFlops(fs->numericFactFlops));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE_ICC0(Mat fact, Mat A, IS, const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr;
Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data;
PetscInt m, nz;
PetscFunctionBegin;
if (PetscDefined(USE_DEBUG)) {
PetscInt i;
PetscBool flg, missing;
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name);
PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Must be square matrix, rows %" PetscInt_FMT " columns %" PetscInt_FMT, A->rmap->n, A->cmap->n);
PetscCall(MatMissingDiagonal(A, &missing, &i));
PetscCheck(!missing, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix is missing diagonal entry %" PetscInt_FMT, i);
}
/* Free the old stale stuff */
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&fs));
/* Copy over A's meta data to fact. Note that we also allocated fact's i,j,a on host,
but they will not be used. Allocate them just for easy debugging.
*/
PetscCall(MatDuplicateNoCreate_SeqAIJ(fact, A, MAT_DO_NOT_COPY_VALUES, PETSC_TRUE /*malloc*/));
fact->offloadmask = PETSC_OFFLOAD_BOTH;
fact->factortype = MAT_FACTOR_ICC;
fact->info.factor_mallocs = 0;
fact->info.fill_ratio_given = info->fill;
fact->info.fill_ratio_needed = 1.0;
aij->row = NULL;
aij->col = NULL;
/* ====================================================================== */
/* Copy A's i, j to fact and also allocate the value array of fact. */
/* We'll do in-place factorization on fact */
/* ====================================================================== */
const int *Ai, *Aj;
m = fact->rmap->n;
nz = aij->nz;
PetscCallCUDA(hipMalloc((void **)&fs->csrRowPtr32, sizeof(*(fs->csrRowPtr32)) * (m + 1)));
PetscCallCUDA(hipMalloc((void **)&fs->csrColIdx32, sizeof(*(fs->csrColIdx32)) * nz));
PetscCallCUDA(hipMalloc((void **)&fs->csrVal, sizeof(PetscScalar) * nz));
PetscCall(MatSeqAIJCUSPARSEGetIJ(A, PETSC_FALSE, &Ai, &Aj)); /* Do not use compressed Ai */
PetscCallCUDA(hipMemcpyAsync(fs->csrRowPtr32, Ai, sizeof(*Ai) * (m + 1), hipMemcpyDeviceToDevice, PetscDefaultCudaStream));
PetscCallCUDA(hipMemcpyAsync(fs->csrColIdx32, Aj, sizeof(*Aj) * nz, hipMemcpyDeviceToDevice, PetscDefaultCudaStream));
/* ====================================================================== */
/* Create mat descriptors for M, L */
/* ====================================================================== */
hipsparseFillMode_t fillMode;
hipsparseDiagType_t diagType;
PetscCallCUSPARSE(hipsparseCreateMatDescr(&fs->matDescr_M));
PetscCallCUSPARSE(hipsparseSetMatIndexBase(fs->matDescr_M, HIPSPARSE_INDEX_BASE_ZERO));
PetscCallCUSPARSE(hipsparseSetMatType(fs->matDescr_M, HIPSPARSE_MATRIX_TYPE_GENERAL));
/* https://docs.nvidia.com/cuda/cusparse/index.html#hipsparseDiagType_t
hipsparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always
assumed to be present, but if HIPSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that
all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine
assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory.
*/
fillMode = HIPSPARSE_FILL_MODE_LOWER;
diagType = HIPSPARSE_DIAG_TYPE_NON_UNIT;
PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_L, m, m, nz, fs->csrRowPtr32, fs->csrColIdx32, fs->csrVal, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype));
PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode)));
PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType)));
/* ========================================================================= */
/* Query buffer sizes for csric0, SpSV of L and Lt, and allocate buffers */
/* ========================================================================= */
PetscCallCUSPARSE(hipsparseCreateCsric02Info(&fs->ic0Info_M));
if (m) PetscCallCUSPARSE(cusparseXcsric02_bufferSize(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ic0Info_M, &fs->factBufferSize_M));
PetscCallCUDA(hipMalloc((void **)&fs->X, sizeof(PetscScalar) * m));
PetscCallCUDA(hipMalloc((void **)&fs->Y, sizeof(PetscScalar) * m));
PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype));
PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype));
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L));
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Lt));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, &fs->spsvBufferSize_Lt));
/* To save device memory, we make the factorization buffer share with one of the solver buffer.
See also comments in MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0().
*/
if (fs->spsvBufferSize_L > fs->spsvBufferSize_Lt) {
PetscCallCUDA(hipMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_L, (size_t)fs->factBufferSize_M)));
fs->spsvBuffer_L = fs->factBuffer_M;
PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_Lt, fs->spsvBufferSize_Lt));
} else {
PetscCallCUDA(hipMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_Lt, (size_t)fs->factBufferSize_M)));
fs->spsvBuffer_Lt = fs->factBuffer_M;
PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L));
}
/* ========================================================================== */
/* Perform analysis of ic0 on M */
/* The lower triangular part of M has the same sparsity pattern as L */
/* ========================================================================== */
int structural_zero;
hipsparseStatus_t status;
fs->policy_M = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
if (m) PetscCallCUSPARSE(cusparseXcsric02_analysis(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ic0Info_M, fs->policy_M, fs->factBuffer_M));
if (PetscDefined(USE_DEBUG)) {
/* Function hipsparseXcsric02_zeroPivot() is a blocking call. It calls hipDeviceSynchronize() to make sure all previous kernels are done. */
status = hipsparseXcsric02_zeroPivot(fs->handle, fs->ic0Info_M, &structural_zero);
PetscCheck(HIPSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Structural zero pivot detected in csric02: A(%d,%d) is missing", structural_zero, structural_zero);
}
/* Estimate FLOPs of the numeric factorization */
{
Mat_SeqAIJ *Aseq = (Mat_SeqAIJ *)A->data;
PetscInt *Ai, nzRow, nzLeft;
PetscLogDouble flops = 0.0;
Ai = Aseq->i;
for (PetscInt i = 0; i < m; i++) {
nzRow = Ai[i + 1] - Ai[i];
if (nzRow > 1) {
/* We want to eliminate nonzeros left to the diagonal one by one. Assume each time, nonzeros right
and include the eliminated one will be updated, which incurs a multiplication and an addition.
*/
nzLeft = (nzRow - 1) / 2;
flops += nzLeft * (2.0 * nzRow - nzLeft + 1);
}
}
fs->numericFactFlops = flops;
}
fact->ops->choleskyfactornumeric = MatICCFactorNumeric_SeqAIJCUSPARSE_ICC0;
PetscFunctionReturn(PETSC_SUCCESS);
}
#endif
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B, Mat A, const MatFactorInfo *info)
{
// use_cpu_solve is a field in Mat_SeqAIJCUSPARSE. B, a factored matrix, uses Mat_SeqAIJCUSPARSETriFactors.
Mat_SeqAIJCUSPARSE *cusparsestruct = static_cast<Mat_SeqAIJCUSPARSE *>(A->spptr);
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A));
PetscCall(MatLUFactorNumeric_SeqAIJ(B, A, info));
B->offloadmask = PETSC_OFFLOAD_CPU;
if (!cusparsestruct->use_cpu_solve) {
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
B->ops->solve = MatSolve_SeqAIJCUSPARSE_LU;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_LU;
#else
/* determine which version of MatSolve needs to be used. */
Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data;
IS isrow = b->row, iscol = b->col;
PetscBool row_identity, col_identity;
PetscCall(ISIdentity(isrow, &row_identity));
PetscCall(ISIdentity(iscol, &col_identity));
if (row_identity && col_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
}
#endif
}
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
/* get the triangular factors */
if (!cusparsestruct->use_cpu_solve) PetscCall(MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS isrow, IS iscol, const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(B->spptr);
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors));
PetscCall(MatLUFactorSymbolic_SeqAIJ(B, A, isrow, iscol, info));
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS isrow, IS iscol, const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr;
PetscFunctionBegin;
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
PetscBool row_identity = PETSC_FALSE, col_identity = PETSC_FALSE;
if (cusparseTriFactors->factorizeOnDevice) {
PetscCall(ISIdentity(isrow, &row_identity));
PetscCall(ISIdentity(iscol, &col_identity));
}
if (!info->levels && row_identity && col_identity) {
PetscCall(MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(B, A, isrow, iscol, info));
} else
#endif
{
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors));
PetscCall(MatILUFactorSymbolic_SeqAIJ(B, A, isrow, iscol, info));
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS perm, const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr;
PetscFunctionBegin;
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
PetscBool perm_identity = PETSC_FALSE;
if (cusparseTriFactors->factorizeOnDevice) PetscCall(ISIdentity(perm, &perm_identity));
if (!info->levels && perm_identity) {
PetscCall(MatICCFactorSymbolic_SeqAIJCUSPARSE_ICC0(B, A, perm, info));
} else
#endif
{
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors));
PetscCall(MatICCFactorSymbolic_SeqAIJ(B, A, perm, info));
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS perm, const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr;
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors));
PetscCall(MatCholeskyFactorSymbolic_SeqAIJ(B, A, perm, info));
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat, MatSolverType *type)
{
PetscFunctionBegin;
*type = MATSOLVERCUSPARSE;
PetscFunctionReturn(PETSC_SUCCESS);
}
/*MC
MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices
on a single GPU of type, `MATSEQAIJCUSPARSE`. Currently supported
algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer
performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the
CuSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these
algorithms are not recommended. This class does NOT support direct solver operations.
Level: beginner
.seealso: [](chapter_matrices), `Mat`, `MATSEQAIJCUSPARSE`, `PCFactorSetMatSolverType()`, `MatSolverType`, `MatCreateSeqAIJCUSPARSE()`,
`MATAIJCUSPARSE`, `MatCreateAIJCUSPARSE()`, `MatCUSPARSESetFormat()`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation`
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A, MatFactorType ftype, Mat *B)
{
PetscInt n = A->rmap->n;
PetscBool factOnDevice, factOnHost;
char *prefix;
char factPlace[32] = "device"; /* the default */
PetscFunctionBegin;
PetscCall(MatCreate(PetscObjectComm((PetscObject)A), B));
PetscCall(MatSetSizes(*B, n, n, n, n));
(*B)->factortype = ftype; // factortype makes MatSetType() allocate spptr of type Mat_SeqAIJCUSPARSETriFactors
PetscCall(MatSetType(*B, MATSEQAIJCUSPARSE));
prefix = (*B)->factorprefix ? (*B)->factorprefix : ((PetscObject)A)->prefix;
PetscOptionsBegin(PetscObjectComm((PetscObject)(*B)), prefix, "MatGetFactor", "Mat");
PetscCall(PetscOptionsString("-mat_factor_bind_factorization", "Do matrix factorization on host or device when possible", "MatGetFactor", NULL, factPlace, sizeof(factPlace), NULL));
PetscOptionsEnd();
PetscCall(PetscStrcasecmp("device", factPlace, &factOnDevice));
PetscCall(PetscStrcasecmp("host", factPlace, &factOnHost));
PetscCheck(factOnDevice || factOnHost, PetscObjectComm((PetscObject)(*B)), PETSC_ERR_ARG_OUTOFRANGE, "Wrong option %s to -mat_factor_bind_factorization <string>. Only host and device are allowed", factPlace);
((Mat_SeqAIJCUSPARSETriFactors *)(*B)->spptr)->factorizeOnDevice = factOnDevice;
if (A->boundtocpu && A->bindingpropagates) PetscCall(MatBindToCPU(*B, PETSC_TRUE));
if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) {
PetscCall(MatSetBlockSizesFromMats(*B, A, A));
if (!A->boundtocpu) {
(*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE;
} else {
(*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJ;
(*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJ;
}
PetscCall(PetscStrallocpy(MATORDERINGND, (char **)&(*B)->preferredordering[MAT_FACTOR_LU]));
PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ILU]));
PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ILUDT]));
} else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) {
if (!A->boundtocpu) {
(*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE;
} else {
(*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJ;
(*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJ;
}
PetscCall(PetscStrallocpy(MATORDERINGND, (char **)&(*B)->preferredordering[MAT_FACTOR_CHOLESKY]));
PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ICC]));
} else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Factor type not supported for CUSPARSE Matrix Types");
PetscCall(MatSeqAIJSetPreallocation(*B, MAT_SKIP_ALLOCATION, NULL));
(*B)->canuseordering = PETSC_TRUE;
PetscCall(PetscObjectComposeFunction((PetscObject)(*B), "MatFactorGetSolverType_C", MatFactorGetSolverType_seqaij_cusparse));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
#endif
PetscFunctionBegin;
if (A->offloadmask == PETSC_OFFLOAD_GPU) {
PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyFromGPU, A, 0, 0, 0));
if (A->factortype == MAT_FACTOR_NONE) {
CsrMatrix *matrix = (CsrMatrix *)cusp->mat->mat;
PetscCallCUDA(hipMemcpy(a->a, matrix->values->data().get(), a->nz * sizeof(PetscScalar), hipMemcpyDeviceToHost));
}
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
else if (fs->csrVal) {
/* We have a factorized matrix on device and are able to copy it to host */
PetscCallCUDA(hipMemcpy(a->a, fs->csrVal, a->nz * sizeof(PetscScalar), hipMemcpyDeviceToHost));
}
#endif
else
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "No support for copying this type of factorized matrix from device to host");
PetscCall(PetscLogGpuToCpu(a->nz * sizeof(PetscScalar)));
PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyFromGPU, A, 0, 0, 0));
A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A, PetscScalar *array[])
{
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A));
*array = ((Mat_SeqAIJ *)A->data)->a;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJRestoreArray_SeqAIJCUSPARSE(Mat A, PetscScalar *array[])
{
PetscFunctionBegin;
A->offloadmask = PETSC_OFFLOAD_CPU;
*array = NULL;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJGetArrayRead_SeqAIJCUSPARSE(Mat A, const PetscScalar *array[])
{
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A));
*array = ((Mat_SeqAIJ *)A->data)->a;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE(Mat, const PetscScalar *array[])
{
PetscFunctionBegin;
*array = NULL;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE(Mat A, PetscScalar *array[])
{
PetscFunctionBegin;
*array = ((Mat_SeqAIJ *)A->data)->a;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE(Mat A, PetscScalar *array[])
{
PetscFunctionBegin;
A->offloadmask = PETSC_OFFLOAD_CPU;
*array = NULL;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJGetCSRAndMemType_SeqAIJCUSPARSE(Mat A, const PetscInt **i, const PetscInt **j, PetscScalar **a, PetscMemType *mtype)
{
Mat_SeqAIJCUSPARSE *cusp;
CsrMatrix *matrix;
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCheck(A->factortype == MAT_FACTOR_NONE, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONGSTATE, "Not for factored matrix");
cusp = static_cast<Mat_SeqAIJCUSPARSE *>(A->spptr);
PetscCheck(cusp != NULL, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONGSTATE, "cusp is NULL");
matrix = (CsrMatrix *)cusp->mat->mat;
if (i) {
#if !defined(PETSC_USE_64BIT_INDICES)
*i = matrix->row_offsets->data().get();
#else
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSparse does not supported 64-bit indices");
#endif
}
if (j) {
#if !defined(PETSC_USE_64BIT_INDICES)
*j = matrix->column_indices->data().get();
#else
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSparse does not supported 64-bit indices");
#endif
}
if (a) *a = matrix->values->data().get();
if (mtype) *mtype = PETSC_MEMTYPE_CUDA;
PetscFunctionReturn(PETSC_SUCCESS);
}
PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat;
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
PetscInt m = A->rmap->n, *ii, *ridx, tmp;
hipsparseStatus_t stat;
PetscBool both = PETSC_TRUE;
PetscFunctionBegin;
PetscCheck(!A->boundtocpu, PETSC_COMM_SELF, PETSC_ERR_GPU, "Cannot copy to GPU");
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { /* Copy values only */
CsrMatrix *matrix;
matrix = (CsrMatrix *)cusparsestruct->mat->mat;
PetscCheck(!a->nz || a->a, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR values");
PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyToGPU, A, 0, 0, 0));
matrix->values->assign(a->a, a->a + a->nz);
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogCpuToGpu((a->nz) * sizeof(PetscScalar)));
PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyToGPU, A, 0, 0, 0));
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE));
} else {
PetscInt nnz;
PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyToGPU, A, 0, 0, 0));
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat, cusparsestruct->format));
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE));
delete cusparsestruct->workVector;
delete cusparsestruct->rowoffsets_gpu;
cusparsestruct->workVector = NULL;
cusparsestruct->rowoffsets_gpu = NULL;
try {
if (a->compressedrow.use) {
m = a->compressedrow.nrows;
ii = a->compressedrow.i;
ridx = a->compressedrow.rindex;
} else {
m = A->rmap->n;
ii = a->i;
ridx = NULL;
}
PetscCheck(ii, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR row data");
if (!a->a) {
nnz = ii[m];
both = PETSC_FALSE;
} else nnz = a->nz;
PetscCheck(!nnz || a->j, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR column data");
/* create cusparse matrix */
cusparsestruct->nrows = m;
matstruct = new Mat_SeqAIJCUSPARSEMultStruct;
PetscCallCUSPARSE(hipsparseCreateMatDescr(&matstruct->descr));
PetscCallCUSPARSE(hipsparseSetMatIndexBase(matstruct->descr, HIPSPARSE_INDEX_BASE_ZERO));
PetscCallCUSPARSE(hipsparseSetMatType(matstruct->descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
PetscCallCUDA(hipMalloc((void **)&(matstruct->alpha_one), sizeof(PetscScalar)));
PetscCallCUDA(hipMalloc((void **)&(matstruct->beta_zero), sizeof(PetscScalar)));
PetscCallCUDA(hipMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar)));
PetscCallCUDA(hipMemcpy(matstruct->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(matstruct->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice));
PetscCallCUSPARSE(hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE));
/* Build a hybrid/ellpack matrix if this option is chosen for the storage */
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
/* set the matrix */
CsrMatrix *mat = new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m + 1);
mat->row_offsets->assign(ii, ii + m + 1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j + nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a + nnz);
/* assign the pointer */
matstruct->mat = mat;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
if (mat->num_rows) { /* cusparse errors on empty matrices! */
stat = hipsparseCreateCsr(&matstruct->matDescr, mat->num_rows, mat->num_cols, mat->num_entries, mat->row_offsets->data().get(), mat->column_indices->data().get(), mat->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);
PetscCallCUSPARSE(stat);
}
#endif
} else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *mat = new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m + 1);
mat->row_offsets->assign(ii, ii + m + 1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j + nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a + nnz);
cusparseHybMat_t hybMat;
PetscCallCUSPARSE(cusparseCreateHybMat(&hybMat));
cusparseHybPartition_t partition = cusparsestruct->format == MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), hybMat, 0, partition);
PetscCallCUSPARSE(stat);
/* assign the pointer */
matstruct->mat = hybMat;
if (mat) {
if (mat->values) delete (THRUSTARRAY *)mat->values;
if (mat->column_indices) delete (THRUSTINTARRAY32 *)mat->column_indices;
if (mat->row_offsets) delete (THRUSTINTARRAY32 *)mat->row_offsets;
delete (CsrMatrix *)mat;
}
#endif
}
/* assign the compressed row indices */
if (a->compressedrow.use) {
cusparsestruct->workVector = new THRUSTARRAY(m);
matstruct->cprowIndices = new THRUSTINTARRAY(m);
matstruct->cprowIndices->assign(ridx, ridx + m);
tmp = m;
} else {
cusparsestruct->workVector = NULL;
matstruct->cprowIndices = NULL;
tmp = 0;
}
PetscCall(PetscLogCpuToGpu(((m + 1) + (a->nz)) * sizeof(int) + tmp * sizeof(PetscInt) + (3 + (a->nz)) * sizeof(PetscScalar)));
/* assign the pointer */
cusparsestruct->mat = matstruct;
} catch (char *ex) {
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex);
}
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyToGPU, A, 0, 0, 0));
cusparsestruct->nonzerostate = A->nonzerostate;
}
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
struct VecCUDAPlusEquals {
template <typename Tuple>
__host__ __device__ void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t);
}
};
struct VecCUDAEquals {
template <typename Tuple>
__host__ __device__ void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<0>(t);
}
};
struct VecCUDAEqualsReverse {
template <typename Tuple>
__host__ __device__ void operator()(Tuple t)
{
thrust::get<0>(t) = thrust::get<1>(t);
}
};
struct MatMatCusparse {
PetscBool cisdense;
PetscScalar *Bt;
Mat X;
PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */
PetscLogDouble flops;
CsrMatrix *Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
hipsparseSpMatDescr_t matSpBDescr;
PetscBool initialized; /* C = alpha op(A) op(B) + beta C */
hipsparseDnMatDescr_t matBDescr;
hipsparseDnMatDescr_t matCDescr;
PetscInt Blda, Clda; /* Record leading dimensions of B and C here to detect changes*/
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
void *dBuffer4;
void *dBuffer5;
#endif
size_t mmBufferSize;
void *mmBuffer;
void *mmBuffer2; /* SpGEMM WorkEstimation buffer */
hipsparseSpGEMMDescr_t spgemmDesc;
#endif
};
static PetscErrorCode MatDestroy_MatMatCusparse(void *data)
{
MatMatCusparse *mmdata = (MatMatCusparse *)data;
PetscFunctionBegin;
PetscCallCUDA(hipFree(mmdata->Bt));
delete mmdata->Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
if (mmdata->matSpBDescr) PetscCallCUSPARSE(hipsparseDestroySpMat(mmdata->matSpBDescr));
if (mmdata->matBDescr) PetscCallCUSPARSE(hipsparseDestroyDnMat(mmdata->matBDescr));
if (mmdata->matCDescr) PetscCallCUSPARSE(hipsparseDestroyDnMat(mmdata->matCDescr));
if (mmdata->spgemmDesc) PetscCallCUSPARSE(hipsparseSpGEMM_destroyDescr(mmdata->spgemmDesc));
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
if (mmdata->dBuffer4) PetscCallCUDA(hipFree(mmdata->dBuffer4));
if (mmdata->dBuffer5) PetscCallCUDA(hipFree(mmdata->dBuffer5));
#endif
if (mmdata->mmBuffer) PetscCallCUDA(hipFree(mmdata->mmBuffer));
if (mmdata->mmBuffer2) PetscCallCUDA(hipFree(mmdata->mmBuffer2));
#endif
PetscCall(MatDestroy(&mmdata->X));
PetscCall(PetscFree(data));
PetscFunctionReturn(PETSC_SUCCESS);
}
#include <../src/mat/impls/dense/seq/dense.h> // MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal()
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A, B;
PetscInt m, n, blda, clda;
PetscBool flg, biscuda;
Mat_SeqAIJCUSPARSE *cusp;
hipsparseStatus_t stat;
hipsparseOperation_t opA;
const PetscScalar *barray;
PetscScalar *carray;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSEMultStruct *mat;
CsrMatrix *csrmat;
PetscFunctionBegin;
MatCheckProduct(C, 1);
PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data empty");
mmdata = (MatMatCusparse *)product->data;
A = product->A;
B = product->B;
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name);
/* currently CopyToGpu does not copy if the matrix is bound to CPU
Instead of silently accepting the wrong answer, I prefer to raise the error */
PetscCheck(!A->boundtocpu, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_PtAP:
mat = cusp->mat;
opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
if (!A->form_explicit_transpose) {
mat = cusp->mat;
opA = HIPSPARSE_OPERATION_TRANSPOSE;
} else {
PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A));
mat = cusp->matTranspose;
opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
}
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
case MATPRODUCT_RARt:
mat = cusp->mat;
opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]);
}
PetscCheck(mat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing Mat_SeqAIJCUSPARSEMultStruct");
csrmat = (CsrMatrix *)mat->mat;
/* if the user passed a CPU matrix, copy the data to the GPU */
PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQDENSECUDA, &biscuda));
if (!biscuda) PetscCall(MatConvert(B, MATSEQDENSECUDA, MAT_INPLACE_MATRIX, &B));
PetscCall(MatDenseGetArrayReadAndMemType(B, &barray, nullptr));
PetscCall(MatDenseGetLDA(B, &blda));
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
PetscCall(MatDenseGetArrayWriteAndMemType(mmdata->X, &carray, nullptr));
PetscCall(MatDenseGetLDA(mmdata->X, &clda));
} else {
PetscCall(MatDenseGetArrayWriteAndMemType(C, &carray, nullptr));
PetscCall(MatDenseGetLDA(C, &clda));
}
PetscCall(PetscLogGpuTimeBegin());
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
hipsparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? HIPSPARSE_OPERATION_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* (re)allocate mmBuffer if not initialized or LDAs are different */
if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) {
size_t mmBufferSize;
if (mmdata->initialized && mmdata->Blda != blda) {
PetscCallCUSPARSE(hipsparseDestroyDnMat(mmdata->matBDescr));
mmdata->matBDescr = NULL;
}
if (!mmdata->matBDescr) {
PetscCallCUSPARSE(hipsparseCreateDnMat(&mmdata->matBDescr, B->rmap->n, B->cmap->n, blda, (void *)barray, cusparse_scalartype, HIPSPARSE_ORDER_COL));
mmdata->Blda = blda;
}
if (mmdata->initialized && mmdata->Clda != clda) {
PetscCallCUSPARSE(hipsparseDestroyDnMat(mmdata->matCDescr));
mmdata->matCDescr = NULL;
}
if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */
PetscCallCUSPARSE(hipsparseCreateDnMat(&mmdata->matCDescr, m, n, clda, (void *)carray, cusparse_scalartype, HIPSPARSE_ORDER_COL));
mmdata->Clda = clda;
}
if (!mat->matDescr) {
stat = hipsparseCreateCsr(&mat->matDescr, csrmat->num_rows, csrmat->num_cols, csrmat->num_entries, csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), csrmat->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);
PetscCallCUSPARSE(stat);
}
stat = hipsparseSpMM_bufferSize(cusp->handle, opA, opB, mat->alpha_one, mat->matDescr, mmdata->matBDescr, mat->beta_zero, mmdata->matCDescr, cusparse_scalartype, cusp->spmmAlg, &mmBufferSize);
PetscCallCUSPARSE(stat);
if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) {
PetscCallCUDA(hipFree(mmdata->mmBuffer));
PetscCallCUDA(hipMalloc(&mmdata->mmBuffer, mmBufferSize));
mmdata->mmBufferSize = mmBufferSize;
}
mmdata->initialized = PETSC_TRUE;
} else {
/* to be safe, always update pointers of the mats */
PetscCallCUSPARSE(hipsparseSpMatSetValues(mat->matDescr, csrmat->values->data().get()));
PetscCallCUSPARSE(hipsparseDnMatSetValues(mmdata->matBDescr, (void *)barray));
PetscCallCUSPARSE(hipsparseDnMatSetValues(mmdata->matCDescr, (void *)carray));
}
/* do hipsparseSpMM, which supports transpose on B */
stat = hipsparseSpMM(cusp->handle, opA, opB, mat->alpha_one, mat->matDescr, mmdata->matBDescr, mat->beta_zero, mmdata->matCDescr, cusparse_scalartype, cusp->spmmAlg, mmdata->mmBuffer);
PetscCallCUSPARSE(stat);
#else
PetscInt k;
/* cusparseXcsrmm does not support transpose on B */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) {
hipblasHandle_t cublasv2handle;
hipblasStatus_t cerr;
PetscCall(PetscCUBLASGetHandle(&cublasv2handle));
cerr = cublasXgeam(cublasv2handle, HIPBLAS_OP_T, HIPBLAS_OP_T, B->cmap->n, B->rmap->n, &PETSC_CUSPARSE_ONE, barray, blda, &PETSC_CUSPARSE_ZERO, barray, blda, mmdata->Bt, B->cmap->n);
PetscCallCUBLAS(cerr);
blda = B->cmap->n;
k = B->cmap->n;
} else {
k = B->rmap->n;
}
/* perform the MatMat operation, op(A) is m x k, op(B) is k x n */
stat = cusparse_csr_spmm(cusp->handle, opA, m, n, k, csrmat->num_entries, mat->alpha_one, mat->descr, csrmat->values->data().get(), csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), mmdata->Bt ? mmdata->Bt : barray, blda, mat->beta_zero, carray, clda);
PetscCallCUSPARSE(stat);
#endif
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(n * 2.0 * csrmat->num_entries));
PetscCall(MatDenseRestoreArrayReadAndMemType(B, &barray));
if (product->type == MATPRODUCT_RARt) {
PetscCall(MatDenseRestoreArrayWriteAndMemType(mmdata->X, &carray));
PetscCall(MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal(B, mmdata->X, C, PETSC_FALSE, PETSC_FALSE));
} else if (product->type == MATPRODUCT_PtAP) {
PetscCall(MatDenseRestoreArrayWriteAndMemType(mmdata->X, &carray));
PetscCall(MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal(B, mmdata->X, C, PETSC_TRUE, PETSC_FALSE));
} else {
PetscCall(MatDenseRestoreArrayWriteAndMemType(C, &carray));
}
if (mmdata->cisdense) PetscCall(MatConvert(C, MATSEQDENSE, MAT_INPLACE_MATRIX, &C));
if (!biscuda) PetscCall(MatConvert(B, MATSEQDENSE, MAT_INPLACE_MATRIX, &B));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A, B;
PetscInt m, n;
PetscBool cisdense, flg;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSE *cusp;
PetscFunctionBegin;
MatCheckProduct(C, 1);
PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data not empty");
A = product->A;
B = product->B;
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name);
cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
PetscCheck(cusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format");
switch (product->type) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
break;
case MATPRODUCT_PtAP:
m = B->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_RARt:
m = B->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]);
}
PetscCall(MatSetSizes(C, m, n, m, n));
/* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */
PetscCall(PetscObjectTypeCompare((PetscObject)C, MATSEQDENSE, &cisdense));
PetscCall(MatSetType(C, MATSEQDENSECUDA));
/* product data */
PetscCall(PetscNew(&mmdata));
mmdata->cisdense = cisdense;
#if PETSC_PKG_CUDA_VERSION_LT(11, 0, 0)
/* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) PetscCallCUDA(hipMalloc((void **)&mmdata->Bt, (size_t)B->rmap->n * (size_t)B->cmap->n * sizeof(PetscScalar)));
#endif
/* for these products we need intermediate storage */
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
PetscCall(MatCreate(PetscObjectComm((PetscObject)C), &mmdata->X));
PetscCall(MatSetType(mmdata->X, MATSEQDENSECUDA));
if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */
PetscCall(MatSetSizes(mmdata->X, A->rmap->n, B->rmap->n, A->rmap->n, B->rmap->n));
} else {
PetscCall(MatSetSizes(mmdata->X, A->rmap->n, B->cmap->n, A->rmap->n, B->cmap->n));
}
}
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A, B;
Mat_SeqAIJCUSPARSE *Acusp, *Bcusp, *Ccusp;
Mat_SeqAIJ *c = (Mat_SeqAIJ *)C->data;
Mat_SeqAIJCUSPARSEMultStruct *Amat, *Bmat, *Cmat;
CsrMatrix *Acsr, *Bcsr, *Ccsr;
PetscBool flg;
hipsparseStatus_t stat;
MatProductType ptype;
MatMatCusparse *mmdata;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
hipsparseSpMatDescr_t BmatSpDescr;
#endif
hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE, opB = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */
PetscFunctionBegin;
MatCheckProduct(C, 1);
PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data empty");
PetscCall(PetscObjectTypeCompare((PetscObject)C, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for C of type %s", ((PetscObject)C)->type_name);
mmdata = (MatMatCusparse *)C->product->data;
A = product->A;
B = product->B;
if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */
mmdata->reusesym = PETSC_FALSE;
Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr;
PetscCheck(Ccusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format");
Cmat = Ccusp->mat;
PetscCheck(Cmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C mult struct for product type %s", MatProductTypes[C->product->type]);
Ccsr = (CsrMatrix *)Cmat->mat;
PetscCheck(Ccsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C CSR struct");
goto finalize;
}
if (!c->nz) goto finalize;
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name);
PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for B of type %s", ((PetscObject)B)->type_name);
PetscCheck(!A->boundtocpu, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
PetscCheck(!B->boundtocpu, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr;
Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr;
PetscCheck(Acusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format");
PetscCheck(Bcusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format");
PetscCheck(Ccusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format");
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCall(MatSeqAIJCUSPARSECopyToGPU(B));
ptype = product->type;
if (A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) {
ptype = MATPRODUCT_AB;
PetscCheck(product->symbolic_used_the_fact_A_is_symmetric, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Symbolic should have been built using the fact that A is symmetric");
}
if (B->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_ABt) {
ptype = MATPRODUCT_AB;
PetscCheck(product->symbolic_used_the_fact_B_is_symmetric, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Symbolic should have been built using the fact that B is symmetric");
}
switch (ptype) {
case MATPRODUCT_AB:
Amat = Acusp->mat;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_AtB:
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_ABt:
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
break;
default:
SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]);
}
Cmat = Ccusp->mat;
PetscCheck(Amat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A mult struct for product type %s", MatProductTypes[ptype]);
PetscCheck(Bmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B mult struct for product type %s", MatProductTypes[ptype]);
PetscCheck(Cmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C mult struct for product type %s", MatProductTypes[ptype]);
Acsr = (CsrMatrix *)Amat->mat;
Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix *)Bmat->mat; /* B may be in compressed row storage */
Ccsr = (CsrMatrix *)Cmat->mat;
PetscCheck(Acsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A CSR struct");
PetscCheck(Bcsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B CSR struct");
PetscCheck(Ccsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C CSR struct");
PetscCall(PetscLogGpuTimeBegin());
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */
PetscCallCUSPARSE(hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE));
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);
PetscCallCUSPARSE(stat);
#else
stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);
PetscCallCUSPARSE(stat);
stat = hipsparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);
PetscCallCUSPARSE(stat);
#endif
#else
stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries,
Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());
PetscCallCUSPARSE(stat);
#endif
PetscCall(PetscLogGpuFlops(mmdata->flops));
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogGpuTimeEnd());
C->offloadmask = PETSC_OFFLOAD_GPU;
finalize:
/* shorter version of MatAssemblyEnd_SeqAIJ */
PetscCall(PetscInfo(C, "Matrix size: %" PetscInt_FMT " X %" PetscInt_FMT "; storage space: 0 unneeded,%" PetscInt_FMT " used\n", C->rmap->n, C->cmap->n, c->nz));
PetscCall(PetscInfo(C, "Number of mallocs during MatSetValues() is 0\n"));
PetscCall(PetscInfo(C, "Maximum nonzeros in any row is %" PetscInt_FMT "\n", c->rmax));
c->reallocs = 0;
C->info.mallocs += 0;
C->info.nz_unneeded = 0;
C->assembled = C->was_assembled = PETSC_TRUE;
C->num_ass++;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A, B;
Mat_SeqAIJCUSPARSE *Acusp, *Bcusp, *Ccusp;
Mat_SeqAIJ *a, *b, *c;
Mat_SeqAIJCUSPARSEMultStruct *Amat, *Bmat, *Cmat;
CsrMatrix *Acsr, *Bcsr, *Ccsr;
PetscInt i, j, m, n, k;
PetscBool flg;
hipsparseStatus_t stat;
MatProductType ptype;
MatMatCusparse *mmdata;
PetscLogDouble flops;
PetscBool biscompressed, ciscompressed;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
int64_t C_num_rows1, C_num_cols1, C_nnz1;
hipsparseSpMatDescr_t BmatSpDescr;
#else
int cnz;
#endif
hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE, opB = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */
PetscFunctionBegin;
MatCheckProduct(C, 1);
PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data not empty");
A = product->A;
B = product->B;
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name);
PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for B of type %s", ((PetscObject)B)->type_name);
a = (Mat_SeqAIJ *)A->data;
b = (Mat_SeqAIJ *)B->data;
/* product data */
PetscCall(PetscNew(&mmdata));
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCall(MatSeqAIJCUSPARSECopyToGPU(B));
Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; /* Access spptr after MatSeqAIJCUSPARSECopyToGPU, not before */
Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr;
PetscCheck(Acusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format");
PetscCheck(Bcusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format");
ptype = product->type;
if (A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) {
ptype = MATPRODUCT_AB;
product->symbolic_used_the_fact_A_is_symmetric = PETSC_TRUE;
}
if (B->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_ABt) {
ptype = MATPRODUCT_AB;
product->symbolic_used_the_fact_B_is_symmetric = PETSC_TRUE;
}
biscompressed = PETSC_FALSE;
ciscompressed = PETSC_FALSE;
switch (ptype) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
k = A->cmap->n;
Amat = Acusp->mat;
Bmat = Bcusp->mat;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
k = A->rmap->n;
PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A));
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
k = A->cmap->n;
PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(B));
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
break;
default:
SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]);
}
/* create cusparse matrix */
PetscCall(MatSetSizes(C, m, n, m, n));
PetscCall(MatSetType(C, MATSEQAIJCUSPARSE));
c = (Mat_SeqAIJ *)C->data;
Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
c->compressedrow.use = ciscompressed;
if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */
c->compressedrow.nrows = a->compressedrow.nrows;
PetscCall(PetscMalloc2(c->compressedrow.nrows + 1, &c->compressedrow.i, c->compressedrow.nrows, &c->compressedrow.rindex));
PetscCall(PetscArraycpy(c->compressedrow.rindex, a->compressedrow.rindex, c->compressedrow.nrows));
Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices->assign(c->compressedrow.rindex, c->compressedrow.rindex + c->compressedrow.nrows);
} else {
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Cmat->cprowIndices = NULL;
}
Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = Ccusp->nrows;
Ccsr->num_cols = n;
Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows + 1);
PetscCallCUSPARSE(hipsparseCreateMatDescr(&Cmat->descr));
PetscCallCUSPARSE(hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO));
PetscCallCUSPARSE(hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
PetscCallCUDA(hipMalloc((void **)&(Cmat->alpha_one), sizeof(PetscScalar)));
PetscCallCUDA(hipMalloc((void **)&(Cmat->beta_zero), sizeof(PetscScalar)));
PetscCallCUDA(hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar)));
PetscCallCUDA(hipMemcpy(Cmat->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(Cmat->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice));
if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */
PetscCallThrust(thrust::fill(thrust::device, Ccsr->row_offsets->begin(), Ccsr->row_offsets->end(), 0));
c->nz = 0;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
goto finalizesym;
}
PetscCheck(Amat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A mult struct for product type %s", MatProductTypes[ptype]);
PetscCheck(Bmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B mult struct for product type %s", MatProductTypes[ptype]);
Acsr = (CsrMatrix *)Amat->mat;
if (!biscompressed) {
Bcsr = (CsrMatrix *)Bmat->mat;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
BmatSpDescr = Bmat->matDescr;
#endif
} else { /* we need to use row offsets for the full matrix */
CsrMatrix *cBcsr = (CsrMatrix *)Bmat->mat;
Bcsr = new CsrMatrix;
Bcsr->num_rows = B->rmap->n;
Bcsr->num_cols = cBcsr->num_cols;
Bcsr->num_entries = cBcsr->num_entries;
Bcsr->column_indices = cBcsr->column_indices;
Bcsr->values = cBcsr->values;
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i, b->i + B->rmap->n + 1);
PetscCall(PetscLogCpuToGpu((B->rmap->n + 1) * sizeof(PetscInt)));
}
Bcsr->row_offsets = Bcusp->rowoffsets_gpu;
mmdata->Bcsr = Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
if (Bcsr->num_rows && Bcsr->num_cols) {
stat = hipsparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Bcsr->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);
PetscCallCUSPARSE(stat);
}
BmatSpDescr = mmdata->matSpBDescr;
#endif
}
PetscCheck(Acsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A CSR struct");
PetscCheck(Bcsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B CSR struct");
/* precompute flops count */
if (ptype == MATPRODUCT_AB) {
for (i = 0, flops = 0; i < A->rmap->n; i++) {
const PetscInt st = a->i[i];
const PetscInt en = a->i[i + 1];
for (j = st; j < en; j++) {
const PetscInt brow = a->j[j];
flops += 2. * (b->i[brow + 1] - b->i[brow]);
}
}
} else if (ptype == MATPRODUCT_AtB) {
for (i = 0, flops = 0; i < A->rmap->n; i++) {
const PetscInt anzi = a->i[i + 1] - a->i[i];
const PetscInt bnzi = b->i[i + 1] - b->i[i];
flops += (2. * anzi) * bnzi;
}
} else { /* TODO */
flops = 0.;
}
mmdata->flops = flops;
PetscCall(PetscLogGpuTimeBegin());
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscCallCUSPARSE(hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE));
stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0, NULL, NULL, NULL, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);
PetscCallCUSPARSE(stat);
PetscCallCUSPARSE(hipsparseSpGEMM_createDescr(&mmdata->spgemmDesc));
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
{
/* cusparseSpGEMMreuse has more reasonable APIs than cusparseSpGEMM, so we prefer to use it.
We follow the sample code at https://github.com/NVIDIA/CUDALibrarySamples/blob/master/cuSPARSE/spgemm_reuse
*/
void *dBuffer1 = NULL;
void *dBuffer2 = NULL;
void *dBuffer3 = NULL;
/* dBuffer4, dBuffer5 are needed by cusparseSpGEMMreuse_compute, and therefore are stored in mmdata */
size_t bufferSize1 = 0;
size_t bufferSize2 = 0;
size_t bufferSize3 = 0;
size_t bufferSize4 = 0;
size_t bufferSize5 = 0;
/* ask bufferSize1 bytes for external memory */
stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, NULL);
PetscCallCUSPARSE(stat);
PetscCallCUDA(hipMalloc((void **)&dBuffer1, bufferSize1));
/* inspect the matrices A and B to understand the memory requirement for the next step */
stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, dBuffer1);
PetscCallCUSPARSE(stat);
stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, NULL, &bufferSize3, NULL, &bufferSize4, NULL);
PetscCallCUSPARSE(stat);
PetscCallCUDA(hipMalloc((void **)&dBuffer2, bufferSize2));
PetscCallCUDA(hipMalloc((void **)&dBuffer3, bufferSize3));
PetscCallCUDA(hipMalloc((void **)&mmdata->dBuffer4, bufferSize4));
stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, dBuffer2, &bufferSize3, dBuffer3, &bufferSize4, mmdata->dBuffer4);
PetscCallCUSPARSE(stat);
PetscCallCUDA(hipFree(dBuffer1));
PetscCallCUDA(hipFree(dBuffer2));
/* get matrix C non-zero entries C_nnz1 */
PetscCallCUSPARSE(hipsparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1));
c->nz = (PetscInt)C_nnz1;
/* allocate matrix C */
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */
/* update matC with the new pointers */
stat = hipsparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get());
PetscCallCUSPARSE(stat);
stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, NULL);
PetscCallCUSPARSE(stat);
PetscCallCUDA(hipMalloc((void **)&mmdata->dBuffer5, bufferSize5));
stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, mmdata->dBuffer5);
PetscCallCUSPARSE(stat);
PetscCallCUDA(hipFree(dBuffer3));
stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);
PetscCallCUSPARSE(stat);
PetscCall(PetscInfo(C, "Buffer sizes for type %s, result %" PetscInt_FMT " x %" PetscInt_FMT " (k %" PetscInt_FMT ", nzA %" PetscInt_FMT ", nzB %" PetscInt_FMT ", nzC %" PetscInt_FMT ") are: %ldKB %ldKB\n", MatProductTypes[ptype], m, n, k, a->nz, b->nz, c->nz, bufferSize4 / 1024, bufferSize5 / 1024));
}
#else
size_t bufSize2;
/* ask bufferSize bytes for external memory */
stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, NULL);
PetscCallCUSPARSE(stat);
PetscCallCUDA(hipMalloc((void **)&mmdata->mmBuffer2, bufSize2));
/* inspect the matrices A and B to understand the memory requirement for the next step */
stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2);
PetscCallCUSPARSE(stat);
/* ask bufferSize again bytes for external memory */
stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL);
PetscCallCUSPARSE(stat);
/* The CUSPARSE documentation is not clear, nor the API
We need both buffers to perform the operations properly!
mmdata->mmBuffer2 does not appear anywhere in the compute/copy API
it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address
is stored in the descriptor! What a messy API... */
PetscCallCUDA(hipMalloc((void **)&mmdata->mmBuffer, mmdata->mmBufferSize));
/* compute the intermediate product of A * B */
stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);
PetscCallCUSPARSE(stat);
/* get matrix C non-zero entries C_nnz1 */
PetscCallCUSPARSE(hipsparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1));
c->nz = (PetscInt)C_nnz1;
PetscCall(PetscInfo(C, "Buffer sizes for type %s, result %" PetscInt_FMT " x %" PetscInt_FMT " (k %" PetscInt_FMT ", nzA %" PetscInt_FMT ", nzB %" PetscInt_FMT ", nzC %" PetscInt_FMT ") are: %ldKB %ldKB\n", MatProductTypes[ptype], m, n, k, a->nz, b->nz, c->nz, bufSize2 / 1024,
mmdata->mmBufferSize / 1024));
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */
stat = hipsparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get());
PetscCallCUSPARSE(stat);
stat = hipsparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);
PetscCallCUSPARSE(stat);
#endif // PETSC_PKG_CUDA_VERSION_GE(11,4,0)
#else
PetscCallCUSPARSE(hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_HOST));
stat = hipsparseXcsrgemmNnz(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries,
Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->row_offsets->data().get(), &cnz);
PetscCallCUSPARSE(stat);
c->nz = cnz;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */
PetscCallCUSPARSE(hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE));
/* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only.
I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when
D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */
stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries,
Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());
PetscCallCUSPARSE(stat);
#endif
PetscCall(PetscLogGpuFlops(mmdata->flops));
PetscCall(PetscLogGpuTimeEnd());
finalizesym:
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
PetscCall(PetscMalloc1(m + 1, &c->i));
PetscCall(PetscMalloc1(c->nz, &c->j));
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
PetscInt *d_i = c->i;
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
if (ciscompressed) d_i = c->compressedrow.i;
PetscCallCUDA(hipMemcpy(d_i, ii.data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), hipMemcpyDeviceToHost));
PetscCallCUDA(hipMemcpy(c->j, jj.data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), hipMemcpyDeviceToHost));
} else {
PetscInt *d_i = c->i;
if (ciscompressed) d_i = c->compressedrow.i;
PetscCallCUDA(hipMemcpy(d_i, Ccsr->row_offsets->data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), hipMemcpyDeviceToHost));
PetscCallCUDA(hipMemcpy(c->j, Ccsr->column_indices->data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), hipMemcpyDeviceToHost));
}
if (ciscompressed) { /* need to expand host row offsets */
PetscInt r = 0;
c->i[0] = 0;
for (k = 0; k < c->compressedrow.nrows; k++) {
const PetscInt next = c->compressedrow.rindex[k];
const PetscInt old = c->compressedrow.i[k];
for (; r < next; r++) c->i[r + 1] = old;
}
for (; r < m; r++) c->i[r + 1] = c->compressedrow.i[c->compressedrow.nrows];
}
PetscCall(PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size()) * sizeof(PetscInt)));
PetscCall(PetscMalloc1(m, &c->ilen));
PetscCall(PetscMalloc1(m, &c->imax));
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (k = 0; k < m; k++) {
const PetscInt nn = c->i[k + 1] - c->i[k];
c->ilen[k] = c->imax[k] = nn;
c->nonzerorowcnt += (PetscInt) !!nn;
c->rmax = PetscMax(c->rmax, nn);
}
PetscCall(MatMarkDiagonal_SeqAIJ(C));
PetscCall(PetscMalloc1(c->nz, &c->a));
Ccsr->num_entries = c->nz;
C->nonzerostate++;
PetscCall(PetscLayoutSetUp(C->rmap));
PetscCall(PetscLayoutSetUp(C->cmap));
Ccusp->nonzerostate = C->nonzerostate;
C->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
C->preallocated = PETSC_TRUE;
C->assembled = PETSC_FALSE;
C->was_assembled = PETSC_FALSE;
if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */
mmdata->reusesym = PETSC_TRUE;
C->offloadmask = PETSC_OFFLOAD_GPU;
}
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
PetscFunctionReturn(PETSC_SUCCESS);
}
PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat);
/* handles sparse or dense B */
static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat)
{
Mat_Product *product = mat->product;
PetscBool isdense = PETSC_FALSE, Biscusp = PETSC_FALSE, Ciscusp = PETSC_TRUE;
PetscFunctionBegin;
MatCheckProduct(mat, 1);
PetscCall(PetscObjectBaseTypeCompare((PetscObject)product->B, MATSEQDENSE, &isdense));
if (!product->A->boundtocpu && !product->B->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->B, MATSEQAIJCUSPARSE, &Biscusp));
if (product->type == MATPRODUCT_ABC) {
Ciscusp = PETSC_FALSE;
if (!product->C->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->C, MATSEQAIJCUSPARSE, &Ciscusp));
}
if (Biscusp && Ciscusp) { /* we can always select the CPU backend */
PetscBool usecpu = PETSC_FALSE;
switch (product->type) {
case MATPRODUCT_AB:
if (product->api_user) {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMult", "Mat");
PetscCall(PetscOptionsBool("-matmatmult_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL));
PetscOptionsEnd();
} else {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AB", "Mat");
PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL));
PetscOptionsEnd();
}
break;
case MATPRODUCT_AtB:
if (product->api_user) {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatTransposeMatMult", "Mat");
PetscCall(PetscOptionsBool("-mattransposematmult_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL));
PetscOptionsEnd();
} else {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AtB", "Mat");
PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL));
PetscOptionsEnd();
}
break;
case MATPRODUCT_PtAP:
if (product->api_user) {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatPtAP", "Mat");
PetscCall(PetscOptionsBool("-matptap_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL));
PetscOptionsEnd();
} else {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_PtAP", "Mat");
PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL));
PetscOptionsEnd();
}
break;
case MATPRODUCT_RARt:
if (product->api_user) {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatRARt", "Mat");
PetscCall(PetscOptionsBool("-matrart_backend_cpu", "Use CPU code", "MatRARt", usecpu, &usecpu, NULL));
PetscOptionsEnd();
} else {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_RARt", "Mat");
PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatRARt", usecpu, &usecpu, NULL));
PetscOptionsEnd();
}
break;
case MATPRODUCT_ABC:
if (product->api_user) {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMatMult", "Mat");
PetscCall(PetscOptionsBool("-matmatmatmult_backend_cpu", "Use CPU code", "MatMatMatMult", usecpu, &usecpu, NULL));
PetscOptionsEnd();
} else {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_ABC", "Mat");
PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMatMult", usecpu, &usecpu, NULL));
PetscOptionsEnd();
}
break;
default:
break;
}
if (usecpu) Biscusp = Ciscusp = PETSC_FALSE;
}
/* dispatch */
if (isdense) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
if (product->A->boundtocpu) {
PetscCall(MatProductSetFromOptions_SeqAIJ_SeqDense(mat));
} else {
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA;
}
break;
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else if (Biscusp && Ciscusp) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
break;
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else { /* fallback for AIJ */
PetscCall(MatProductSetFromOptions_SeqAIJ(mat));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy)
{
PetscFunctionBegin;
PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_FALSE, PETSC_FALSE));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz)
{
PetscFunctionBegin;
PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_FALSE, PETSC_FALSE));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy)
{
PetscFunctionBegin;
PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_TRUE, PETSC_TRUE));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz)
{
PetscFunctionBegin;
PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_TRUE, PETSC_TRUE));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy)
{
PetscFunctionBegin;
PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_TRUE, PETSC_FALSE));
PetscFunctionReturn(PETSC_SUCCESS);
}
__global__ static void ScatterAdd(PetscInt n, PetscInt *idx, const PetscScalar *x, PetscScalar *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[idx[i]] += x[i];
}
/* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz, PetscBool trans, PetscBool herm)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct;
PetscScalar *xarray, *zarray, *dptr, *beta, *xptr;
hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
PetscBool compressed;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscInt nx, ny;
#endif
PetscFunctionBegin;
PetscCheck(!herm || trans, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Hermitian and not transpose not supported");
if (!a->nz) {
if (yy) PetscCall(VecSeq_CUDA::Copy(yy, zz));
else PetscCall(VecSeq_CUDA::Set(zz, 0));
PetscFunctionReturn(PETSC_SUCCESS);
}
/* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
if (!trans) {
matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat;
PetscCheck(matstruct, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "SeqAIJCUSPARSE does not have a 'mat' (need to fix)");
} else {
if (herm || !A->form_explicit_transpose) {
opA = herm ? HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE;
matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat;
} else {
if (!cusparsestruct->matTranspose) PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A));
matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->matTranspose;
}
}
/* Does the matrix use compressed rows (i.e., drop zero rows)? */
compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE;
try {
PetscCall(VecCUDAGetArrayRead(xx, (const PetscScalar **)&xarray));
if (yy == zz) PetscCall(VecCUDAGetArray(zz, &zarray)); /* read & write zz, so need to get up-to-date zarray on GPU */
else PetscCall(VecCUDAGetArrayWrite(zz, &zarray)); /* write zz, so no need to init zarray on GPU */
PetscCall(PetscLogGpuTimeBegin());
if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) {
/* z = A x + beta y.
If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax.
When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call.
*/
xptr = xarray;
dptr = compressed ? cusparsestruct->workVector->data().get() : zarray;
beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
/* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is
allocated to accommodate different uses. So we get the length info directly from mat.
*/
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix *)matstruct->mat;
nx = mat->num_cols;
ny = mat->num_rows;
}
#endif
} else {
/* z = A^T x + beta y
If A is compressed, then we need a work vector as the shorter version of x to compute A^T x.
Note A^Tx is of full length, so we set beta to 1.0 if y exists.
*/
xptr = compressed ? cusparsestruct->workVector->data().get() : xarray;
dptr = zarray;
beta = yy ? matstruct->beta_one : matstruct->beta_zero;
if (compressed) { /* Scatter x to work vector */
thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray);
thrust::for_each(
#if PetscDefined(HAVE_THRUST_ASYNC)
thrust::hip::par.on(PetscDefaultCudaStream),
#endif
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAEqualsReverse());
}
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix *)matstruct->mat;
nx = mat->num_rows;
ny = mat->num_cols;
}
#endif
}
/* csr_spmv does y = alpha op(A) x + beta y */
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscCheck(opA >= 0 && opA <= 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE ABI on hipsparseOperation_t has changed and PETSc has not been updated accordingly");
if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */
PetscCallCUSPARSE(hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr, nx, xptr, cusparse_scalartype));
PetscCallCUSPARSE(hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr, ny, dptr, cusparse_scalartype));
PetscCallCUSPARSE(
hipsparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, &matstruct->cuSpMV[opA].spmvBufferSize));
PetscCallCUDA(hipMalloc(&matstruct->cuSpMV[opA].spmvBuffer, matstruct->cuSpMV[opA].spmvBufferSize));
matstruct->cuSpMV[opA].initialized = PETSC_TRUE;
} else {
/* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */
PetscCallCUSPARSE(hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr, xptr));
PetscCallCUSPARSE(hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr, dptr));
}
PetscCallCUSPARSE(hipsparseSpMV(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEFormExplicitTranspose() */
matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, matstruct->cuSpMV[opA].spmvBuffer));
#else
CsrMatrix *mat = (CsrMatrix *)matstruct->mat;
PetscCallCUSPARSE(cusparse_csr_spmv(cusparsestruct->handle, opA, mat->num_rows, mat->num_cols, mat->num_entries, matstruct->alpha_one, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xptr, beta, dptr));
#endif
} else {
if (cusparsestruct->nrows) {
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat;
PetscCallCUSPARSE(cusparse_hyb_spmv(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->descr, hybMat, xptr, beta, dptr));
#endif
}
}
PetscCall(PetscLogGpuTimeEnd());
if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) {
if (yy) { /* MatMultAdd: zz = A*xx + yy */
if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */
PetscCall(VecSeq_CUDA::Copy(yy, zz)); /* zz = yy */
} else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */
PetscCall(VecSeq_CUDA::AXPY(zz, 1.0, yy)); /* zz += yy */
}
} else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */
PetscCall(VecSeq_CUDA::Set(zz, 0));
}
/* ScatterAdd the result from work vector into the full vector when A is compressed */
if (compressed) {
PetscCall(PetscLogGpuTimeBegin());
/* I wanted to make this for_each asynchronous but failed. thrust::async::for_each() returns an event (internally registered)
and in the destructor of the scope, it will call hipStreamSynchronize() on this stream. One has to store all events to
prevent that. So I just add a ScatterAdd kernel.
*/
#if 0
thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray);
thrust::async::for_each(thrust::hip::par.on(cusparsestruct->stream),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(),
VecCUDAPlusEquals());
#else
PetscInt n = matstruct->cprowIndices->size();
hipLaunchKernelGGL(( ScatterAdd), dim3((n + 255) / 256), dim3(256), 0, PetscDefaultCudaStream, n, matstruct->cprowIndices->data().get(), cusparsestruct->workVector->data().get(), zarray);
#endif
PetscCall(PetscLogGpuTimeEnd());
}
} else {
if (yy && yy != zz) PetscCall(VecSeq_CUDA::AXPY(zz, 1.0, yy)); /* zz += yy */
}
PetscCall(VecCUDARestoreArrayRead(xx, (const PetscScalar **)&xarray));
if (yy == zz) PetscCall(VecCUDARestoreArray(zz, &zarray));
else PetscCall(VecCUDARestoreArrayWrite(zz, &zarray));
} catch (char *ex) {
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex);
}
if (yy) {
PetscCall(PetscLogGpuFlops(2.0 * a->nz));
} else {
PetscCall(PetscLogGpuFlops(2.0 * a->nz - a->nonzerorowcnt));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz)
{
PetscFunctionBegin;
PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_TRUE, PETSC_FALSE));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A, MatAssemblyType mode)
{
PetscObjectState onnz = A->nonzerostate;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
PetscFunctionBegin;
PetscCall(MatAssemblyEnd_SeqAIJ(A, mode));
if (onnz != A->nonzerostate && cusp->deviceMat) {
PetscCall(PetscInfo(A, "Destroy device mat since nonzerostate changed\n"));
PetscCallCUDA(hipFree(cusp->deviceMat));
cusp->deviceMat = NULL;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@
MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in `MATAIJCUSPARSE` (compressed row) format
(the default parallel PETSc format). This matrix will ultimately pushed down
to NVIDIA GPUs and use the CuSPARSE library for calculations. For good matrix
assembly performance the user should preallocate the matrix storage by setting
the parameter `nz` (or the array `nnz`).
Collective
Input Parameters:
+ comm - MPI communicator, set to `PETSC_COMM_SELF`
. m - number of rows
. n - number of columns
. nz - number of nonzeros per row (same for all rows), ignored if `nnz` is provide
- nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or `NULL`
Output Parameter:
. A - the matrix
Level: intermediate
Notes:
It is recommended that one use the `MatCreate()`, `MatSetType()` and/or `MatSetFromOptions()`,
MatXXXXSetPreallocation() paradgm instead of this routine directly.
[MatXXXXSetPreallocation() is, for example, `MatSeqAIJSetPreallocation()`]
The AIJ format, also called
compressed row storage, is fully compatible with standard Fortran
storage. That is, the stored row and column indices can begin at
either one (as in Fortran) or zero.
Specify the preallocated storage with either nz or nnz (not both).
Set `nz` = `PETSC_DEFAULT` and `nnz` = `NULL` for PETSc to control dynamic memory
allocation.
.seealso: [](chapter_matrices), `Mat`, `MATSEQAIJCUSPARSE`, `MatCreate()`, `MatCreateAIJ()`, `MatSetValues()`, `MatSeqAIJSetColumnIndices()`, `MatCreateSeqAIJWithArrays()`, `MatCreateAIJ()`, `MATSEQAIJCUSPARSE`, `MATAIJCUSPARSE`
@*/
PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt nz, const PetscInt nnz[], Mat *A)
{
PetscFunctionBegin;
PetscCall(MatCreate(comm, A));
PetscCall(MatSetSizes(*A, m, n, m, n));
PetscCall(MatSetType(*A, MATSEQAIJCUSPARSE));
PetscCall(MatSeqAIJSetPreallocation_SeqAIJ(*A, nz, (PetscInt *)nnz));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A)
{
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
PetscCall(MatSeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE **)&A->spptr));
} else {
PetscCall(MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors **)&A->spptr));
}
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatCUSPARSESetFormat_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatCUSPARSESetUseCPUSolve_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatFactorGetSolverType_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatConvert_seqaijcusparse_hypre_C", NULL));
PetscCall(MatDestroy_SeqAIJ(A));
PetscFunctionReturn(PETSC_SUCCESS);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat, MatType, MatReuse, Mat *);
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat, PetscBool);
static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A, MatDuplicateOption cpvalues, Mat *B)
{
PetscFunctionBegin;
PetscCall(MatDuplicate_SeqAIJ(A, cpvalues, B));
PetscCall(MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B, MATSEQAIJCUSPARSE, MAT_INPLACE_MATRIX, B));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y, PetscScalar a, Mat X, MatStructure str)
{
Mat_SeqAIJ *x = (Mat_SeqAIJ *)X->data, *y = (Mat_SeqAIJ *)Y->data;
Mat_SeqAIJCUSPARSE *cy;
Mat_SeqAIJCUSPARSE *cx;
PetscScalar *ay;
const PetscScalar *ax;
CsrMatrix *csry, *csrx;
PetscFunctionBegin;
cy = (Mat_SeqAIJCUSPARSE *)Y->spptr;
cx = (Mat_SeqAIJCUSPARSE *)X->spptr;
if (X->ops->axpy != Y->ops->axpy) {
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(Y, PETSC_FALSE));
PetscCall(MatAXPY_SeqAIJ(Y, a, X, str));
PetscFunctionReturn(PETSC_SUCCESS);
}
/* if we are here, it means both matrices are bound to GPU */
PetscCall(MatSeqAIJCUSPARSECopyToGPU(Y));
PetscCall(MatSeqAIJCUSPARSECopyToGPU(X));
PetscCheck(cy->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)Y), PETSC_ERR_GPU, "only MAT_CUSPARSE_CSR supported");
PetscCheck(cx->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)X), PETSC_ERR_GPU, "only MAT_CUSPARSE_CSR supported");
csry = (CsrMatrix *)cy->mat->mat;
csrx = (CsrMatrix *)cx->mat->mat;
/* see if we can turn this into a cublas axpy */
if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) {
bool eq = thrust::equal(thrust::device, csry->row_offsets->begin(), csry->row_offsets->end(), csrx->row_offsets->begin());
if (eq) eq = thrust::equal(thrust::device, csry->column_indices->begin(), csry->column_indices->end(), csrx->column_indices->begin());
if (eq) str = SAME_NONZERO_PATTERN;
}
/* spgeam is buggy with one column */
if (Y->cmap->n == 1 && str != SAME_NONZERO_PATTERN) str = DIFFERENT_NONZERO_PATTERN;
if (str == SUBSET_NONZERO_PATTERN) {
PetscScalar b = 1.0;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
size_t bufferSize;
void *buffer;
#endif
PetscCall(MatSeqAIJCUSPARSEGetArrayRead(X, &ax));
PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay));
PetscCallCUSPARSE(hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_HOST));
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscCallCUSPARSE(cusparse_csr_spgeam_bufferSize(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(),
csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), &bufferSize));
PetscCallCUDA(hipMalloc(&buffer, bufferSize));
PetscCall(PetscLogGpuTimeBegin());
PetscCallCUSPARSE(cusparse_csr_spgeam(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(),
csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), buffer));
PetscCall(PetscLogGpuFlops(x->nz + y->nz));
PetscCall(PetscLogGpuTimeEnd());
PetscCallCUDA(hipFree(buffer));
#else
PetscCall(PetscLogGpuTimeBegin());
PetscCallCUSPARSE(cusparse_csr_spgeam(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(),
csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get()));
PetscCall(PetscLogGpuFlops(x->nz + y->nz));
PetscCall(PetscLogGpuTimeEnd());
#endif
PetscCallCUSPARSE(hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_DEVICE));
PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(X, &ax));
PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay));
PetscCall(MatSeqAIJInvalidateDiagonal(Y));
} else if (str == SAME_NONZERO_PATTERN) {
hipblasHandle_t cublasv2handle;
PetscBLASInt one = 1, bnz = 1;
PetscCall(MatSeqAIJCUSPARSEGetArrayRead(X, &ax));
PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay));
PetscCall(PetscCUBLASGetHandle(&cublasv2handle));
PetscCall(PetscBLASIntCast(x->nz, &bnz));
PetscCall(PetscLogGpuTimeBegin());
PetscCallCUBLAS(cublasXaxpy(cublasv2handle, bnz, &a, ax, one, ay, one));
PetscCall(PetscLogGpuFlops(2.0 * bnz));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(X, &ax));
PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay));
PetscCall(MatSeqAIJInvalidateDiagonal(Y));
} else {
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(Y, PETSC_FALSE));
PetscCall(MatAXPY_SeqAIJ(Y, a, X, str));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat Y, PetscScalar a)
{
Mat_SeqAIJ *y = (Mat_SeqAIJ *)Y->data;
PetscScalar *ay;
hipblasHandle_t cublasv2handle;
PetscBLASInt one = 1, bnz = 1;
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay));
PetscCall(PetscCUBLASGetHandle(&cublasv2handle));
PetscCall(PetscBLASIntCast(y->nz, &bnz));
PetscCall(PetscLogGpuTimeBegin());
PetscCallCUBLAS(cublasXscal(cublasv2handle, bnz, &a, ay, one));
PetscCall(PetscLogGpuFlops(bnz));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay));
PetscCall(MatSeqAIJInvalidateDiagonal(Y));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A)
{
PetscBool both = PETSC_FALSE;
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE *)A->spptr;
if (spptr->mat) {
CsrMatrix *matrix = (CsrMatrix *)spptr->mat->mat;
if (matrix->values) {
both = PETSC_TRUE;
thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.);
}
}
if (spptr->matTranspose) {
CsrMatrix *matrix = (CsrMatrix *)spptr->matTranspose->mat;
if (matrix->values) thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.);
}
}
PetscCall(PetscArrayzero(a->a, a->i[A->rmap->n]));
PetscCall(MatSeqAIJInvalidateDiagonal(A));
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
else A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A, PetscBool flg)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
PetscFunctionBegin;
if (A->factortype != MAT_FACTOR_NONE) {
A->boundtocpu = flg;
PetscFunctionReturn(PETSC_SUCCESS);
}
if (flg) {
PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A));
A->ops->scale = MatScale_SeqAIJ;
A->ops->axpy = MatAXPY_SeqAIJ;
A->ops->zeroentries = MatZeroEntries_SeqAIJ;
A->ops->mult = MatMult_SeqAIJ;
A->ops->multadd = MatMultAdd_SeqAIJ;
A->ops->multtranspose = MatMultTranspose_SeqAIJ;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ;
A->ops->multhermitiantranspose = NULL;
A->ops->multhermitiantransposeadd = NULL;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ;
PetscCall(PetscMemzero(a->ops, sizeof(Mat_SeqAIJOps)));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", NULL));
} else {
A->ops->scale = MatScale_SeqAIJCUSPARSE;
A->ops->axpy = MatAXPY_SeqAIJCUSPARSE;
A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE;
A->ops->mult = MatMult_SeqAIJCUSPARSE;
A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE;
A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE;
A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE;
A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE;
a->ops->getarray = MatSeqAIJGetArray_SeqAIJCUSPARSE;
a->ops->restorearray = MatSeqAIJRestoreArray_SeqAIJCUSPARSE;
a->ops->getarrayread = MatSeqAIJGetArrayRead_SeqAIJCUSPARSE;
a->ops->restorearrayread = MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE;
a->ops->getarraywrite = MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE;
a->ops->restorearraywrite = MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE;
a->ops->getcsrandmemtype = MatSeqAIJGetCSRAndMemType_SeqAIJCUSPARSE;
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", MatSeqAIJCopySubArray_SeqAIJCUSPARSE));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", MatProductSetFromOptions_SeqAIJCUSPARSE));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", MatProductSetFromOptions_SeqAIJCUSPARSE));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", MatSetPreallocationCOO_SeqAIJCUSPARSE));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", MatSetValuesCOO_SeqAIJCUSPARSE));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", MatProductSetFromOptions_SeqAIJCUSPARSE));
}
A->boundtocpu = flg;
if (flg && a->inode.size) {
a->inode.use = PETSC_TRUE;
} else {
a->inode.use = PETSC_FALSE;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType, MatReuse reuse, Mat *newmat)
{
Mat B;
PetscFunctionBegin;
PetscCall(PetscDeviceInitialize(PETSC_DEVICE_CUDA)); /* first use of CUSPARSE may be via MatConvert */
if (reuse == MAT_INITIAL_MATRIX) {
PetscCall(MatDuplicate(A, MAT_COPY_VALUES, newmat));
} else if (reuse == MAT_REUSE_MATRIX) {
PetscCall(MatCopy(A, *newmat, SAME_NONZERO_PATTERN));
}
B = *newmat;
PetscCall(PetscFree(B->defaultvectype));
PetscCall(PetscStrallocpy(VECCUDA, &B->defaultvectype));
if (reuse != MAT_REUSE_MATRIX && !B->spptr) {
if (B->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr;
PetscCall(PetscNew(&spptr));
PetscCallCUSPARSE(hipsparseCreate(&spptr->handle));
PetscCallCUSPARSE(hipsparseSetStream(spptr->handle, PetscDefaultCudaStream));
spptr->format = MAT_CUSPARSE_CSR;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
spptr->spmvAlg = CUSPARSE_SPMV_CSR_ALG1; /* default, since we only support csr */
#else
spptr->spmvAlg = HIPSPARSE_CSRMV_ALG1; /* default, since we only support csr */
#endif
spptr->spmmAlg = HIPSPARSE_CSRMM_ALG1; /* default, only support column-major dense matrix B */
spptr->csr2cscAlg = HIPSPARSE_CSR2CSC_ALG1;
#endif
B->spptr = spptr;
} else {
Mat_SeqAIJCUSPARSETriFactors *spptr;
PetscCall(PetscNew(&spptr));
PetscCallCUSPARSE(hipsparseCreate(&spptr->handle));
PetscCallCUSPARSE(hipsparseSetStream(spptr->handle, PetscDefaultCudaStream));
B->spptr = spptr;
}
B->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
}
B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE;
B->ops->destroy = MatDestroy_SeqAIJCUSPARSE;
B->ops->setoption = MatSetOption_SeqAIJCUSPARSE;
B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE;
B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE;
B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE;
PetscCall(MatBindToCPU_SeqAIJCUSPARSE(B, PETSC_FALSE));
PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATSEQAIJCUSPARSE));
PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_SeqAIJCUSPARSE));
#if defined(PETSC_HAVE_HYPRE)
PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_seqaijcusparse_hypre_C", MatConvert_AIJ_HYPRE));
#endif
PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetUseCPUSolve_C", MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE));
PetscFunctionReturn(PETSC_SUCCESS);
}
PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B)
{
PetscFunctionBegin;
PetscCall(MatCreate_SeqAIJ(B));
PetscCall(MatConvert_SeqAIJ_SeqAIJCUSPARSE(B, MATSEQAIJCUSPARSE, MAT_INPLACE_MATRIX, &B));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*MC
MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices.
A matrix type type whose data resides on NVIDIA GPUs. These matrices can be in either
CSR, ELL, or Hybrid format.
All matrix calculations are performed on NVIDIA GPUs using the CuSPARSE library.
Options Database Keys:
+ -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to `MatSetFromOptions()`
. -mat_cusparse_storage_format csr - sets the storage format of matrices (for `MatMult()` and factors in `MatSolve()`).
Other options include ell (ellpack) or hyb (hybrid).
. -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for `MatMult()`). Other options include ell (ellpack) or hyb (hybrid).
- -mat_cusparse_use_cpu_solve - Do `MatSolve()` on CPU
Level: beginner
.seealso: [](chapter_matrices), `Mat`, `MatCreateSeqAIJCUSPARSE()`, `MatCUSPARSESetUseCPUSolve()`, `MATAIJCUSPARSE`, `MatCreateAIJCUSPARSE()`, `MatCUSPARSESetFormat()`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation`
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat, MatFactorType, Mat *);
PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void)
{
PetscFunctionBegin;
PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSEBAND, MATSEQAIJ, MAT_FACTOR_LU, MatGetFactor_seqaijcusparse_cusparse_band));
PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_LU, MatGetFactor_seqaijcusparse_cusparse));
PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_CHOLESKY, MatGetFactor_seqaijcusparse_cusparse));
PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_ILU, MatGetFactor_seqaijcusparse_cusparse));
PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_ICC, MatGetFactor_seqaijcusparse_cusparse));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatResetPreallocationCOO_SeqAIJCUSPARSE(Mat mat)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)mat->spptr;
PetscFunctionBegin;
if (!cusp) PetscFunctionReturn(PETSC_SUCCESS);
delete cusp->cooPerm;
delete cusp->cooPerm_a;
cusp->cooPerm = NULL;
cusp->cooPerm_a = NULL;
if (cusp->use_extended_coo) {
PetscCallCUDA(hipFree(cusp->jmap_d));
PetscCallCUDA(hipFree(cusp->perm_d));
}
cusp->use_extended_coo = PETSC_FALSE;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct)
{
PetscFunctionBegin;
if (*cusparsestruct) {
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat, (*cusparsestruct)->format));
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose, (*cusparsestruct)->format));
delete (*cusparsestruct)->workVector;
delete (*cusparsestruct)->rowoffsets_gpu;
delete (*cusparsestruct)->cooPerm;
delete (*cusparsestruct)->cooPerm_a;
delete (*cusparsestruct)->csr2csc_i;
if ((*cusparsestruct)->handle) PetscCallCUSPARSE(hipsparseDestroy((*cusparsestruct)->handle));
if ((*cusparsestruct)->jmap_d) PetscCallCUDA(hipFree((*cusparsestruct)->jmap_d));
if ((*cusparsestruct)->perm_d) PetscCallCUDA(hipFree((*cusparsestruct)->perm_d));
PetscCall(PetscFree(*cusparsestruct));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat)
{
PetscFunctionBegin;
if (*mat) {
delete (*mat)->values;
delete (*mat)->column_indices;
delete (*mat)->row_offsets;
delete *mat;
*mat = 0;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
#if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0)
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor)
{
PetscFunctionBegin;
if (*trifactor) {
if ((*trifactor)->descr) PetscCallCUSPARSE(hipsparseDestroyMatDescr((*trifactor)->descr));
if ((*trifactor)->solveInfo) PetscCallCUSPARSE(cusparseDestroyCsrsvInfo((*trifactor)->solveInfo));
PetscCall(CsrMatrix_Destroy(&(*trifactor)->csrMat));
if ((*trifactor)->solveBuffer) PetscCallCUDA(hipFree((*trifactor)->solveBuffer));
if ((*trifactor)->AA_h) PetscCallCUDA(hipHostFree((*trifactor)->AA_h));
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
if ((*trifactor)->csr2cscBuffer) PetscCallCUDA(hipFree((*trifactor)->csr2cscBuffer));
#endif
PetscCall(PetscFree(*trifactor));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
#endif
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct, MatCUSPARSEStorageFormat format)
{
CsrMatrix *mat;
PetscFunctionBegin;
if (*matstruct) {
if ((*matstruct)->mat) {
if (format == MAT_CUSPARSE_ELL || format == MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat;
PetscCallCUSPARSE(cusparseDestroyHybMat(hybMat));
#endif
} else {
mat = (CsrMatrix *)(*matstruct)->mat;
PetscCall(CsrMatrix_Destroy(&mat));
}
}
if ((*matstruct)->descr) PetscCallCUSPARSE(hipsparseDestroyMatDescr((*matstruct)->descr));
delete (*matstruct)->cprowIndices;
if ((*matstruct)->alpha_one) PetscCallCUDA(hipFree((*matstruct)->alpha_one));
if ((*matstruct)->beta_zero) PetscCallCUDA(hipFree((*matstruct)->beta_zero));
if ((*matstruct)->beta_one) PetscCallCUDA(hipFree((*matstruct)->beta_one));
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct;
if (mdata->matDescr) PetscCallCUSPARSE(hipsparseDestroySpMat(mdata->matDescr));
for (int i = 0; i < 3; i++) {
if (mdata->cuSpMV[i].initialized) {
PetscCallCUDA(hipFree(mdata->cuSpMV[i].spmvBuffer));
PetscCallCUSPARSE(hipsparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr));
PetscCallCUSPARSE(hipsparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr));
}
}
#endif
delete *matstruct;
*matstruct = NULL;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p *trifactors)
{
Mat_SeqAIJCUSPARSETriFactors *fs = *trifactors;
PetscFunctionBegin;
if (fs) {
#if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0)
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->loTriFactorPtr));
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->upTriFactorPtr));
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->loTriFactorPtrTranspose));
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->upTriFactorPtrTranspose));
delete fs->workVector;
fs->workVector = NULL;
#endif
delete fs->rpermIndices;
delete fs->cpermIndices;
fs->rpermIndices = NULL;
fs->cpermIndices = NULL;
if (fs->a_band_d) PetscCallCUDA(hipFree(fs->a_band_d));
if (fs->i_band_d) PetscCallCUDA(hipFree(fs->i_band_d));
fs->init_dev_prop = PETSC_FALSE;
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
PetscCallCUDA(hipFree(fs->csrRowPtr));
PetscCallCUDA(hipFree(fs->csrColIdx));
PetscCallCUDA(hipFree(fs->csrRowPtr32));
PetscCallCUDA(hipFree(fs->csrColIdx32));
PetscCallCUDA(hipFree(fs->csrVal));
PetscCallCUDA(hipFree(fs->diag));
PetscCallCUDA(hipFree(fs->X));
PetscCallCUDA(hipFree(fs->Y));
// PetscCallCUDA(hipFree(fs->factBuffer_M)); /* No needed since factBuffer_M shares with one of spsvBuffer_L/U */
PetscCallCUDA(hipFree(fs->spsvBuffer_L));
PetscCallCUDA(hipFree(fs->spsvBuffer_U));
PetscCallCUDA(hipFree(fs->spsvBuffer_Lt));
PetscCallCUDA(hipFree(fs->spsvBuffer_Ut));
PetscCallCUSPARSE(hipsparseDestroyMatDescr(fs->matDescr_M));
PetscCallCUSPARSE(hipsparseDestroySpMat(fs->spMatDescr_L));
PetscCallCUSPARSE(hipsparseDestroySpMat(fs->spMatDescr_U));
PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_L));
PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_Lt));
PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_U));
PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_Ut));
PetscCallCUSPARSE(hipsparseDestroyDnVec(fs->dnVecDescr_X));
PetscCallCUSPARSE(hipsparseDestroyDnVec(fs->dnVecDescr_Y));
PetscCallCUSPARSE(hipsparseDestroyCsrilu02Info(fs->ilu0Info_M));
PetscCallCUSPARSE(hipsparseDestroyCsric02Info(fs->ic0Info_M));
PetscCall(PetscFree(fs->csrRowPtr_h));
PetscCall(PetscFree(fs->csrVal_h));
PetscCall(PetscFree(fs->diag_h));
fs->createdTransposeSpSVDescr = PETSC_FALSE;
fs->updatedTransposeSpSVAnalysis = PETSC_FALSE;
#endif
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors **trifactors)
{
PetscFunctionBegin;
if (*trifactors) {
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(trifactors));
PetscCallCUSPARSE(hipsparseDestroy((*trifactors)->handle));
PetscCall(PetscFree(*trifactors));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
struct IJCompare {
__host__ __device__ inline bool operator()(const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct IJEqual {
__host__ __device__ inline bool operator()(const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() != t2.get<0>() || t1.get<1>() != t2.get<1>()) return false;
return true;
}
};
struct IJDiff {
__host__ __device__ inline PetscInt operator()(const PetscInt &t1, const PetscInt &t2) { return t1 == t2 ? 0 : 1; }
};
struct IJSum {
__host__ __device__ inline PetscInt operator()(const PetscInt &t1, const PetscInt &t2) { return t1 || t2; }
};
#include <thrust/iterator/discard_iterator.h>
/* Associated with MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic() */
PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE_Basic(Mat A, const PetscScalar v[], InsertMode imode)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
THRUSTARRAY *cooPerm_v = NULL;
thrust::device_ptr<const PetscScalar> d_v;
CsrMatrix *matrix;
PetscInt n;
PetscFunctionBegin;
PetscCheck(cusp, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUSPARSE struct");
PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUSPARSE CsrMatrix");
if (!cusp->cooPerm) {
PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
PetscFunctionReturn(PETSC_SUCCESS);
}
matrix = (CsrMatrix *)cusp->mat->mat;
PetscCheck(matrix->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory");
if (!v) {
if (imode == INSERT_VALUES) thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.);
goto finalize;
}
n = cusp->cooPerm->size();
if (isCudaMem(v)) {
d_v = thrust::device_pointer_cast(v);
} else {
cooPerm_v = new THRUSTARRAY(n);
cooPerm_v->assign(v, v + n);
d_v = cooPerm_v->data();
PetscCall(PetscLogCpuToGpu(n * sizeof(PetscScalar)));
}
PetscCall(PetscLogGpuTimeBegin());
if (imode == ADD_VALUES) { /* ADD VALUES means add to existing ones */
if (cusp->cooPerm_a) { /* there are repeated entries in d_v[], and we need to add these them */
THRUSTARRAY *cooPerm_w = new THRUSTARRAY(matrix->values->size());
auto vbit = thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin());
/* thrust::reduce_by_key(keys_first,keys_last,values_first,keys_output,values_output)
cooPerm_a = [0,0,1,2,3,4]. The length is n, number of nonozeros in d_v[].
cooPerm_a is ordered. d_v[i] is the cooPerm_a[i]-th unique nonzero.
*/
thrust::reduce_by_key(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), vbit, thrust::make_discard_iterator(), cooPerm_w->begin(), thrust::equal_to<PetscInt>(), thrust::plus<PetscScalar>());
thrust::transform(cooPerm_w->begin(), cooPerm_w->end(), matrix->values->begin(), matrix->values->begin(), thrust::plus<PetscScalar>());
delete cooPerm_w;
} else {
/* all nonzeros in d_v[] are unique entries */
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin()), matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->end()), matrix->values->end()));
thrust::for_each(zibit, zieit, VecCUDAPlusEquals()); /* values[i] += d_v[cooPerm[i]] */
}
} else {
if (cusp->cooPerm_a) { /* repeated entries in COO, with INSERT_VALUES -> reduce */
auto vbit = thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin());
thrust::reduce_by_key(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), vbit, thrust::make_discard_iterator(), matrix->values->begin(), thrust::equal_to<PetscInt>(), thrust::plus<PetscScalar>());
} else {
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin()), matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->end()), matrix->values->end()));
thrust::for_each(zibit, zieit, VecCUDAEquals());
}
}
PetscCall(PetscLogGpuTimeEnd());
finalize:
delete cooPerm_v;
A->offloadmask = PETSC_OFFLOAD_GPU;
PetscCall(PetscObjectStateIncrease((PetscObject)A));
/* shorter version of MatAssemblyEnd_SeqAIJ */
PetscCall(PetscInfo(A, "Matrix size: %" PetscInt_FMT " X %" PetscInt_FMT "; storage space: 0 unneeded,%" PetscInt_FMT " used\n", A->rmap->n, A->cmap->n, a->nz));
PetscCall(PetscInfo(A, "Number of mallocs during MatSetValues() is 0\n"));
PetscCall(PetscInfo(A, "Maximum nonzeros in any row is %" PetscInt_FMT "\n", a->rmax));
a->reallocs = 0;
A->info.mallocs += 0;
A->info.nz_unneeded = 0;
A->assembled = A->was_assembled = PETSC_TRUE;
A->num_ass++;
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat A, PetscBool destroy)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
PetscFunctionBegin;
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
if (!cusp) PetscFunctionReturn(PETSC_SUCCESS);
if (destroy) {
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose, cusp->format));
delete cusp->csr2csc_i;
cusp->csr2csc_i = NULL;
}
A->transupdated = PETSC_FALSE;
PetscFunctionReturn(PETSC_SUCCESS);
}
#include <thrust/binary_search.h>
/* 'Basic' means it only works when coo_i[] and coo_j[] do not contain negative indices */
PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic(Mat A, PetscCount n, PetscInt coo_i[], PetscInt coo_j[])
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
PetscInt cooPerm_n, nzr = 0;
PetscFunctionBegin;
PetscCall(PetscLayoutSetUp(A->rmap));
PetscCall(PetscLayoutSetUp(A->cmap));
cooPerm_n = cusp->cooPerm ? cusp->cooPerm->size() : 0;
if (n != cooPerm_n) {
delete cusp->cooPerm;
delete cusp->cooPerm_a;
cusp->cooPerm = NULL;
cusp->cooPerm_a = NULL;
}
if (n) {
thrust::device_ptr<PetscInt> d_i, d_j;
PetscInt *d_raw_i, *d_raw_j;
PetscBool free_raw_i = PETSC_FALSE, free_raw_j = PETSC_FALSE;
PetscMemType imtype, jmtype;
PetscCall(PetscGetMemType(coo_i, &imtype));
if (PetscMemTypeHost(imtype)) {
PetscCallCUDA(hipMalloc(&d_raw_i, sizeof(PetscInt) * n));
PetscCallCUDA(hipMemcpy(d_raw_i, coo_i, sizeof(PetscInt) * n, hipMemcpyHostToDevice));
d_i = thrust::device_pointer_cast(d_raw_i);
free_raw_i = PETSC_TRUE;
PetscCall(PetscLogCpuToGpu(1. * n * sizeof(PetscInt)));
} else {
d_i = thrust::device_pointer_cast(coo_i);
}
PetscCall(PetscGetMemType(coo_j, &jmtype));
if (PetscMemTypeHost(jmtype)) { // MatSetPreallocationCOO_MPIAIJCUSPARSE_Basic() passes device coo_i[] and host coo_j[]!
PetscCallCUDA(hipMalloc(&d_raw_j, sizeof(PetscInt) * n));
PetscCallCUDA(hipMemcpy(d_raw_j, coo_j, sizeof(PetscInt) * n, hipMemcpyHostToDevice));
d_j = thrust::device_pointer_cast(d_raw_j);
free_raw_j = PETSC_TRUE;
PetscCall(PetscLogCpuToGpu(1. * n * sizeof(PetscInt)));
} else {
d_j = thrust::device_pointer_cast(coo_j);
}
THRUSTINTARRAY ii(A->rmap->n);
if (!cusp->cooPerm) cusp->cooPerm = new THRUSTINTARRAY(n);
if (!cusp->cooPerm_a) cusp->cooPerm_a = new THRUSTINTARRAY(n);
/* Ex.
n = 6
coo_i = [3,3,1,4,1,4]
coo_j = [3,2,2,5,2,6]
*/
auto fkey = thrust::make_zip_iterator(thrust::make_tuple(d_i, d_j));
auto ekey = thrust::make_zip_iterator(thrust::make_tuple(d_i + n, d_j + n));
PetscCall(PetscLogGpuTimeBegin());
thrust::sequence(thrust::device, cusp->cooPerm->begin(), cusp->cooPerm->end(), 0);
thrust::sort_by_key(fkey, ekey, cusp->cooPerm->begin(), IJCompare()); /* sort by row, then by col */
(*cusp->cooPerm_a).assign(d_i, d_i + n); /* copy the sorted array */
THRUSTINTARRAY w(d_j, d_j + n);
/*
d_i = [1,1,3,3,4,4]
d_j = [2,2,2,3,5,6]
cooPerm = [2,4,1,0,3,5]
*/
auto nekey = thrust::unique(fkey, ekey, IJEqual()); /* unique (d_i, d_j) */
/*
d_i = [1,3,3,4,4,x]
^ekey
d_j = [2,2,3,5,6,x]
^nekye
*/
if (nekey == ekey) { /* all entries are unique */
delete cusp->cooPerm_a;
cusp->cooPerm_a = NULL;
} else { /* Stefano: I couldn't come up with a more elegant algorithm */
/* idea: any change in i or j in the (i,j) sequence implies a new nonzero */
adjacent_difference(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), cusp->cooPerm_a->begin(), IJDiff()); /* cooPerm_a: [1,1,3,3,4,4] => [1,0,1,0,1,0]*/
adjacent_difference(w.begin(), w.end(), w.begin(), IJDiff()); /* w: [2,2,2,3,5,6] => [2,0,0,1,1,1]*/
(*cusp->cooPerm_a)[0] = 0; /* clear the first entry, though accessing an entry on device implies a hipMemcpy */
w[0] = 0;
thrust::transform(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), w.begin(), cusp->cooPerm_a->begin(), IJSum()); /* cooPerm_a = [0,0,1,1,1,1]*/
thrust::inclusive_scan(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), cusp->cooPerm_a->begin(), thrust::plus<PetscInt>()); /*cooPerm_a=[0,0,1,2,3,4]*/
}
thrust::counting_iterator<PetscInt> search_begin(0);
thrust::upper_bound(d_i, nekey.get_iterator_tuple().get<0>(), /* binary search entries of [0,1,2,3,4,5,6) in ordered array d_i = [1,3,3,4,4], supposing A->rmap->n = 6. */
search_begin, search_begin + A->rmap->n, /* return in ii[] the index of last position in d_i[] where value could be inserted without violating the ordering */
ii.begin()); /* ii = [0,1,1,3,5,5]. A leading 0 will be added later */
PetscCall(PetscLogGpuTimeEnd());
PetscCall(MatSeqXAIJFreeAIJ(A, &a->a, &a->j, &a->i));
a->singlemalloc = PETSC_FALSE;
a->free_a = PETSC_TRUE;
a->free_ij = PETSC_TRUE;
PetscCall(PetscMalloc1(A->rmap->n + 1, &a->i));
a->i[0] = 0; /* a->i = [0,0,1,1,3,5,5] */
PetscCallCUDA(hipMemcpy(a->i + 1, ii.data().get(), A->rmap->n * sizeof(PetscInt), hipMemcpyDeviceToHost));
a->nz = a->maxnz = a->i[A->rmap->n];
a->rmax = 0;
PetscCall(PetscMalloc1(a->nz, &a->a));
PetscCall(PetscMalloc1(a->nz, &a->j));
PetscCallCUDA(hipMemcpy(a->j, thrust::raw_pointer_cast(d_j), a->nz * sizeof(PetscInt), hipMemcpyDeviceToHost));
if (!a->ilen) PetscCall(PetscMalloc1(A->rmap->n, &a->ilen));
if (!a->imax) PetscCall(PetscMalloc1(A->rmap->n, &a->imax));
for (PetscInt i = 0; i < A->rmap->n; i++) {
const PetscInt nnzr = a->i[i + 1] - a->i[i];
nzr += (PetscInt) !!(nnzr);
a->ilen[i] = a->imax[i] = nnzr;
a->rmax = PetscMax(a->rmax, nnzr);
}
a->nonzerorowcnt = nzr;
A->preallocated = PETSC_TRUE;
PetscCall(PetscLogGpuToCpu((A->rmap->n + a->nz) * sizeof(PetscInt)));
PetscCall(MatMarkDiagonal_SeqAIJ(A));
if (free_raw_i) PetscCallCUDA(hipFree(d_raw_i));
if (free_raw_j) PetscCallCUDA(hipFree(d_raw_j));
} else {
PetscCall(MatSeqAIJSetPreallocation(A, 0, NULL));
}
PetscCall(MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE));
/* We want to allocate the CUSPARSE struct for matvec now.
The code is so convoluted now that I prefer to copy zeros */
PetscCall(PetscArrayzero(a->a, a->nz));
PetscCall(MatCheckCompressedRow(A, nzr, &a->compressedrow, a->i, A->rmap->n, 0.6));
A->offloadmask = PETSC_OFFLOAD_CPU;
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat mat, PetscCount coo_n, PetscInt coo_i[], PetscInt coo_j[])
{
Mat_SeqAIJ *seq;
Mat_SeqAIJCUSPARSE *dev;
PetscBool coo_basic = PETSC_TRUE;
PetscMemType mtype = PETSC_MEMTYPE_DEVICE;
PetscFunctionBegin;
PetscCall(MatResetPreallocationCOO_SeqAIJ(mat));
PetscCall(MatResetPreallocationCOO_SeqAIJCUSPARSE(mat));
if (coo_i) {
PetscCall(PetscGetMemType(coo_i, &mtype));
if (PetscMemTypeHost(mtype)) {
for (PetscCount k = 0; k < coo_n; k++) {
if (coo_i[k] < 0 || coo_j[k] < 0) {
coo_basic = PETSC_FALSE;
break;
}
}
}
}
if (coo_basic) { /* i,j are on device or do not contain negative indices */
PetscCall(MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic(mat, coo_n, coo_i, coo_j));
} else {
PetscCall(MatSetPreallocationCOO_SeqAIJ(mat, coo_n, coo_i, coo_j));
mat->offloadmask = PETSC_OFFLOAD_CPU;
PetscCall(MatSeqAIJCUSPARSECopyToGPU(mat));
seq = static_cast<Mat_SeqAIJ *>(mat->data);
dev = static_cast<Mat_SeqAIJCUSPARSE *>(mat->spptr);
PetscCallCUDA(hipMalloc((void **)&dev->jmap_d, (seq->nz + 1) * sizeof(PetscCount)));
PetscCallCUDA(hipMemcpy(dev->jmap_d, seq->jmap, (seq->nz + 1) * sizeof(PetscCount), hipMemcpyHostToDevice));
PetscCallCUDA(hipMalloc((void **)&dev->perm_d, seq->Atot * sizeof(PetscCount)));
PetscCallCUDA(hipMemcpy(dev->perm_d, seq->perm, seq->Atot * sizeof(PetscCount), hipMemcpyHostToDevice));
dev->use_extended_coo = PETSC_TRUE;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
__global__ static void MatAddCOOValues(const PetscScalar kv[], PetscCount nnz, const PetscCount jmap[], const PetscCount perm[], InsertMode imode, PetscScalar a[])
{
PetscCount i = blockIdx.x * blockDim.x + threadIdx.x;
const PetscCount grid_size = gridDim.x * blockDim.x;
for (; i < nnz; i += grid_size) {
PetscScalar sum = 0.0;
for (PetscCount k = jmap[i]; k < jmap[i + 1]; k++) sum += kv[perm[k]];
a[i] = (imode == INSERT_VALUES ? 0.0 : a[i]) + sum;
}
}
PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode)
{
Mat_SeqAIJ *seq = (Mat_SeqAIJ *)A->data;
Mat_SeqAIJCUSPARSE *dev = (Mat_SeqAIJCUSPARSE *)A->spptr;
PetscCount Annz = seq->nz;
PetscMemType memtype;
const PetscScalar *v1 = v;
PetscScalar *Aa;
PetscFunctionBegin;
if (dev->use_extended_coo) {
PetscCall(PetscGetMemType(v, &memtype));
if (PetscMemTypeHost(memtype)) { /* If user gave v[] in host, we might need to copy it to device if any */
PetscCallCUDA(hipMalloc((void **)&v1, seq->coo_n * sizeof(PetscScalar)));
PetscCallCUDA(hipMemcpy((void *)v1, v, seq->coo_n * sizeof(PetscScalar), hipMemcpyHostToDevice));
}
if (imode == INSERT_VALUES) PetscCall(MatSeqAIJCUSPARSEGetArrayWrite(A, &Aa));
else PetscCall(MatSeqAIJCUSPARSEGetArray(A, &Aa));
if (Annz) {
hipLaunchKernelGGL(( MatAddCOOValues), dim3((Annz + 255) / 256), dim3(256), 0, 0, v1, Annz, dev->jmap_d, dev->perm_d, imode, Aa);
PetscCallCUDA(hipPeekAtLastError());
}
if (imode == INSERT_VALUES) PetscCall(MatSeqAIJCUSPARSERestoreArrayWrite(A, &Aa));
else PetscCall(MatSeqAIJCUSPARSERestoreArray(A, &Aa));
if (PetscMemTypeHost(memtype)) PetscCallCUDA(hipFree((void *)v1));
} else {
PetscCall(MatSetValuesCOO_SeqAIJCUSPARSE_Basic(A, v, imode));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSEGetIJ - returns the device row storage `i` and `j` indices for `MATSEQAIJCUSPARSE` matrices.
Not Collective
Input Parameters:
+ A - the matrix
- compressed - `PETSC_TRUE` or `PETSC_FALSE` indicating the matrix data structure should be always returned in compressed form
Output Parameters:
+ i - the CSR row pointers
- j - the CSR column indices
Level: developer
Note:
When compressed is true, the CSR structure does not contain empty rows
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSERestoreIJ()`, `MatSeqAIJCUSPARSEGetArrayRead()`
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetIJ(Mat A, PetscBool compressed, const int **i, const int **j)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
CsrMatrix *csr;
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
if (!i || !j) PetscFunctionReturn(PETSC_SUCCESS);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented");
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix *)cusp->mat->mat;
if (i) {
if (!compressed && a->compressedrow.use) { /* need full row offset */
if (!cusp->rowoffsets_gpu) {
cusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
cusp->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1);
PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt)));
}
*i = cusp->rowoffsets_gpu->data().get();
} else *i = csr->row_offsets->data().get();
}
if (j) *j = csr->column_indices->data().get();
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSERestoreIJ - restore the device row storage `i` and `j` indices obtained with `MatSeqAIJCUSPARSEGetIJ()`
Not Collective
Input Parameters:
+ A - the matrix
. compressed - `PETSC_TRUE` or `PETSC_FALSE` indicating the matrix data structure should be always returned in compressed form
. i - the CSR row pointers
- j - the CSR column indices
Level: developer
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetIJ()`
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreIJ(Mat A, PetscBool compressed, const int **i, const int **j)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
if (i) *i = NULL;
if (j) *j = NULL;
(void)compressed;
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSEGetArrayRead - gives read-only access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored
Not Collective
Input Parameter:
. A - a `MATSEQAIJCUSPARSE` matrix
Output Parameter:
. a - pointer to the device data
Level: developer
Note:
May trigger host-device copies if up-to-date matrix data is on host
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()`, `MatSeqAIJCUSPARSEGetArrayWrite()`, `MatSeqAIJCUSPARSERestoreArrayRead()`
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar **a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
CsrMatrix *csr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscValidPointer(a, 2);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented");
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix *)cusp->mat->mat;
PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory");
*a = csr->values->data().get();
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSERestoreArrayRead - restore the read-only access array obtained from `MatSeqAIJCUSPARSEGetArrayRead()`
Not Collective
Input Parameters:
+ A - a `MATSEQAIJCUSPARSE` matrix
- a - pointer to the device data
Level: developer
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayRead()`
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar **a)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscValidPointer(a, 2);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
*a = NULL;
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSEGetArray - gives read-write access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored
Not Collective
Input Parameter:
. A - a `MATSEQAIJCUSPARSE` matrix
Output Parameter:
. a - pointer to the device data
Level: developer
Note:
May trigger host-device copies if up-to-date matrix data is on host
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayRead()`, `MatSeqAIJCUSPARSEGetArrayWrite()`, `MatSeqAIJCUSPARSERestoreArray()`
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar **a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
CsrMatrix *csr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscValidPointer(a, 2);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented");
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix *)cusp->mat->mat;
PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSERestoreArray - restore the read-write access array obtained from `MatSeqAIJCUSPARSEGetArray()`
Not Collective
Input Parameters:
+ A - a `MATSEQAIJCUSPARSE` matrix
- a - pointer to the device data
Level: developer
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()`
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscValidPointer(a, 2);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
PetscCall(MatSeqAIJInvalidateDiagonal(A));
PetscCall(PetscObjectStateIncrease((PetscObject)A));
*a = NULL;
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSEGetArrayWrite - gives write access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored
Not Collective
Input Parameter:
. A - a `MATSEQAIJCUSPARSE` matrix
Output Parameter:
. a - pointer to the device data
Level: developer
Note:
Does not trigger host-device copies and flags data validity on the GPU
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()`, `MatSeqAIJCUSPARSEGetArrayRead()`, `MatSeqAIJCUSPARSERestoreArrayWrite()`
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar **a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
CsrMatrix *csr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscValidPointer(a, 2);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented");
PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix *)cusp->mat->mat;
PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSERestoreArrayWrite - restore the write-only access array obtained from `MatSeqAIJCUSPARSEGetArrayWrite()`
Not Collective
Input Parameters:
+ A - a `MATSEQAIJCUSPARSE` matrix
- a - pointer to the device data
Level: developer
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayWrite()`
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscValidPointer(a, 2);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
PetscCall(MatSeqAIJInvalidateDiagonal(A));
PetscCall(PetscObjectStateIncrease((PetscObject)A));
*a = NULL;
PetscFunctionReturn(PETSC_SUCCESS);
}
struct IJCompare4 {
__host__ __device__ inline bool operator()(const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct Shift {
int _shift;
Shift(int shift) : _shift(shift) { }
__host__ __device__ inline int operator()(const int &c) { return c + _shift; }
};
/* merges two SeqAIJCUSPARSE matrices A, B by concatenating their rows. [A';B']' operation in matlab notation */
PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A, Mat B, MatReuse reuse, Mat *C)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data, *b = (Mat_SeqAIJ *)B->data, *c;
Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr, *Ccusp;
Mat_SeqAIJCUSPARSEMultStruct *Cmat;
CsrMatrix *Acsr, *Bcsr, *Ccsr;
PetscInt Annz, Bnnz;
hipsparseStatus_t stat;
PetscInt i, m, n, zero = 0;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscValidHeaderSpecific(B, MAT_CLASSID, 2);
PetscValidPointer(C, 4);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
PetscCheckTypeName(B, MATSEQAIJCUSPARSE);
PetscCheck(A->rmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Invalid number or rows %" PetscInt_FMT " != %" PetscInt_FMT, A->rmap->n, B->rmap->n);
PetscCheck(reuse != MAT_INPLACE_MATRIX, PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_INPLACE_MATRIX not supported");
PetscCheck(Acusp->format != MAT_CUSPARSE_ELL && Acusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented");
PetscCheck(Bcusp->format != MAT_CUSPARSE_ELL && Bcusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented");
if (reuse == MAT_INITIAL_MATRIX) {
m = A->rmap->n;
n = A->cmap->n + B->cmap->n;
PetscCall(MatCreate(PETSC_COMM_SELF, C));
PetscCall(MatSetSizes(*C, m, n, m, n));
PetscCall(MatSetType(*C, MATSEQAIJCUSPARSE));
c = (Mat_SeqAIJ *)(*C)->data;
Ccusp = (Mat_SeqAIJCUSPARSE *)(*C)->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
Cmat->cprowIndices = NULL;
c->compressedrow.use = PETSC_FALSE;
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Ccusp->nrows = m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = m;
Ccsr->num_cols = n;
PetscCallCUSPARSE(hipsparseCreateMatDescr(&Cmat->descr));
PetscCallCUSPARSE(hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO));
PetscCallCUSPARSE(hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
PetscCallCUDA(hipMalloc((void **)&(Cmat->alpha_one), sizeof(PetscScalar)));
PetscCallCUDA(hipMalloc((void **)&(Cmat->beta_zero), sizeof(PetscScalar)));
PetscCallCUDA(hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar)));
PetscCallCUDA(hipMemcpy(Cmat->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(Cmat->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice));
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCall(MatSeqAIJCUSPARSECopyToGPU(B));
PetscCheck(Acusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
PetscCheck(Bcusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix *)Acusp->mat->mat;
Bcsr = (CsrMatrix *)Bcusp->mat->mat;
Annz = (PetscInt)Acsr->column_indices->size();
Bnnz = (PetscInt)Bcsr->column_indices->size();
c->nz = Annz + Bnnz;
Ccsr->row_offsets = new THRUSTINTARRAY32(m + 1);
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
Ccsr->num_entries = c->nz;
Ccusp->cooPerm = new THRUSTINTARRAY(c->nz);
if (c->nz) {
auto Acoo = new THRUSTINTARRAY32(Annz);
auto Bcoo = new THRUSTINTARRAY32(Bnnz);
auto Ccoo = new THRUSTINTARRAY32(c->nz);
THRUSTINTARRAY32 *Aroff, *Broff;
if (a->compressedrow.use) { /* need full row offset */
if (!Acusp->rowoffsets_gpu) {
Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
Acusp->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1);
PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt)));
}
Aroff = Acusp->rowoffsets_gpu;
} else Aroff = Acsr->row_offsets;
if (b->compressedrow.use) { /* need full row offset */
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i, b->i + B->rmap->n + 1);
PetscCall(PetscLogCpuToGpu((B->rmap->n + 1) * sizeof(PetscInt)));
}
Broff = Bcusp->rowoffsets_gpu;
} else Broff = Bcsr->row_offsets;
PetscCall(PetscLogGpuTimeBegin());
stat = hipsparseXcsr2coo(Acusp->handle, Aroff->data().get(), Annz, m, Acoo->data().get(), HIPSPARSE_INDEX_BASE_ZERO);
PetscCallCUSPARSE(stat);
stat = hipsparseXcsr2coo(Bcusp->handle, Broff->data().get(), Bnnz, m, Bcoo->data().get(), HIPSPARSE_INDEX_BASE_ZERO);
PetscCallCUSPARSE(stat);
/* Issues when using bool with large matrices on SUMMIT 10.2.89 */
auto Aperm = thrust::make_constant_iterator(1);
auto Bperm = thrust::make_constant_iterator(0);
#if PETSC_PKG_CUDA_VERSION_GE(10, 0, 0)
auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(), Shift(A->cmap->n));
auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(), Shift(A->cmap->n));
#else
/* there are issues instantiating the merge operation using a transform iterator for the columns of B */
auto Bcib = Bcsr->column_indices->begin();
auto Bcie = Bcsr->column_indices->end();
thrust::transform(Bcib, Bcie, Bcib, Shift(A->cmap->n));
#endif
auto wPerm = new THRUSTINTARRAY32(Annz + Bnnz);
auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(), Acsr->column_indices->begin(), Acsr->values->begin(), Aperm));
auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(), Acsr->column_indices->end(), Acsr->values->end(), Aperm));
auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(), Bcib, Bcsr->values->begin(), Bperm));
auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(), Bcie, Bcsr->values->end(), Bperm));
auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(), Ccsr->column_indices->begin(), Ccsr->values->begin(), wPerm->begin()));
auto p1 = Ccusp->cooPerm->begin();
auto p2 = Ccusp->cooPerm->begin();
thrust::advance(p2, Annz);
PetscCallThrust(thrust::merge(thrust::device, Azb, Aze, Bzb, Bze, Czb, IJCompare4()));
#if PETSC_PKG_CUDA_VERSION_LT(10, 0, 0)
thrust::transform(Bcib, Bcie, Bcib, Shift(-A->cmap->n));
#endif
auto cci = thrust::make_counting_iterator(zero);
auto cce = thrust::make_counting_iterator(c->nz);
#if 0 //Errors on SUMMIT cuda 11.1.0
PetscCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>()));
#else
auto pred = thrust::identity<int>();
PetscCallThrust(thrust::copy_if(thrust::device, cci, cce, wPerm->begin(), p1, pred));
PetscCallThrust(thrust::remove_copy_if(thrust::device, cci, cce, wPerm->begin(), p2, pred));
#endif
stat = hipsparseXcoo2csr(Ccusp->handle, Ccoo->data().get(), c->nz, m, Ccsr->row_offsets->data().get(), HIPSPARSE_INDEX_BASE_ZERO);
PetscCallCUSPARSE(stat);
PetscCall(PetscLogGpuTimeEnd());
delete wPerm;
delete Acoo;
delete Bcoo;
delete Ccoo;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);
PetscCallCUSPARSE(stat);
#endif
if (A->form_explicit_transpose && B->form_explicit_transpose) { /* if A and B have the transpose, generate C transpose too */
PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A));
PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(B));
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct;
CsrMatrix *CcsrT = new CsrMatrix;
CsrMatrix *AcsrT = AT ? (CsrMatrix *)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix *)Bcusp->matTranspose->mat : NULL;
(*C)->form_explicit_transpose = PETSC_TRUE;
(*C)->transupdated = PETSC_TRUE;
Ccusp->rowoffsets_gpu = NULL;
CmatT->cprowIndices = NULL;
CmatT->mat = CcsrT;
CcsrT->num_rows = n;
CcsrT->num_cols = m;
CcsrT->num_entries = c->nz;
CcsrT->row_offsets = new THRUSTINTARRAY32(n + 1);
CcsrT->column_indices = new THRUSTINTARRAY32(c->nz);
CcsrT->values = new THRUSTARRAY(c->nz);
PetscCall(PetscLogGpuTimeBegin());
auto rT = CcsrT->row_offsets->begin();
if (AT) {
rT = thrust::copy(AcsrT->row_offsets->begin(), AcsrT->row_offsets->end(), rT);
thrust::advance(rT, -1);
}
if (BT) {
auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(), Shift(a->nz));
auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(), Shift(a->nz));
thrust::copy(titb, tite, rT);
}
auto cT = CcsrT->column_indices->begin();
if (AT) cT = thrust::copy(AcsrT->column_indices->begin(), AcsrT->column_indices->end(), cT);
if (BT) thrust::copy(BcsrT->column_indices->begin(), BcsrT->column_indices->end(), cT);
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(), AcsrT->values->end(), vT);
if (BT) thrust::copy(BcsrT->values->begin(), BcsrT->values->end(), vT);
PetscCall(PetscLogGpuTimeEnd());
PetscCallCUSPARSE(hipsparseCreateMatDescr(&CmatT->descr));
PetscCallCUSPARSE(hipsparseSetMatIndexBase(CmatT->descr, HIPSPARSE_INDEX_BASE_ZERO));
PetscCallCUSPARSE(hipsparseSetMatType(CmatT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
PetscCallCUDA(hipMalloc((void **)&(CmatT->alpha_one), sizeof(PetscScalar)));
PetscCallCUDA(hipMalloc((void **)&(CmatT->beta_zero), sizeof(PetscScalar)));
PetscCallCUDA(hipMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar)));
PetscCallCUDA(hipMemcpy(CmatT->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(CmatT->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice));
PetscCallCUDA(hipMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice));
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
stat = hipsparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries, CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);
PetscCallCUSPARSE(stat);
#endif
Ccusp->matTranspose = CmatT;
}
}
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
PetscCall(PetscMalloc1(m + 1, &c->i));
PetscCall(PetscMalloc1(c->nz, &c->j));
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
PetscCallCUDA(hipMemcpy(c->i, ii.data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), hipMemcpyDeviceToHost));
PetscCallCUDA(hipMemcpy(c->j, jj.data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), hipMemcpyDeviceToHost));
} else {
PetscCallCUDA(hipMemcpy(c->i, Ccsr->row_offsets->data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), hipMemcpyDeviceToHost));
PetscCallCUDA(hipMemcpy(c->j, Ccsr->column_indices->data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), hipMemcpyDeviceToHost));
}
PetscCall(PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size()) * sizeof(PetscInt)));
PetscCall(PetscMalloc1(m, &c->ilen));
PetscCall(PetscMalloc1(m, &c->imax));
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (i = 0; i < m; i++) {
const PetscInt nn = c->i[i + 1] - c->i[i];
c->ilen[i] = c->imax[i] = nn;
c->nonzerorowcnt += (PetscInt) !!nn;
c->rmax = PetscMax(c->rmax, nn);
}
PetscCall(MatMarkDiagonal_SeqAIJ(*C));
PetscCall(PetscMalloc1(c->nz, &c->a));
(*C)->nonzerostate++;
PetscCall(PetscLayoutSetUp((*C)->rmap));
PetscCall(PetscLayoutSetUp((*C)->cmap));
Ccusp->nonzerostate = (*C)->nonzerostate;
(*C)->preallocated = PETSC_TRUE;
} else {
PetscCheck((*C)->rmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Invalid number or rows %" PetscInt_FMT " != %" PetscInt_FMT, (*C)->rmap->n, B->rmap->n);
c = (Mat_SeqAIJ *)(*C)->data;
if (c->nz) {
Ccusp = (Mat_SeqAIJCUSPARSE *)(*C)->spptr;
PetscCheck(Ccusp->cooPerm, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cooPerm");
PetscCheck(Ccusp->format != MAT_CUSPARSE_ELL && Ccusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented");
PetscCheck(Ccusp->nonzerostate == (*C)->nonzerostate, PETSC_COMM_SELF, PETSC_ERR_COR, "Wrong nonzerostate");
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCall(MatSeqAIJCUSPARSECopyToGPU(B));
PetscCheck(Acusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
PetscCheck(Bcusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix *)Acusp->mat->mat;
Bcsr = (CsrMatrix *)Bcusp->mat->mat;
Ccsr = (CsrMatrix *)Ccusp->mat->mat;
PetscCheck(Acsr->num_entries == (PetscInt)Acsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "A nnz %" PetscInt_FMT " != %" PetscInt_FMT, Acsr->num_entries, (PetscInt)Acsr->values->size());
PetscCheck(Bcsr->num_entries == (PetscInt)Bcsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "B nnz %" PetscInt_FMT " != %" PetscInt_FMT, Bcsr->num_entries, (PetscInt)Bcsr->values->size());
PetscCheck(Ccsr->num_entries == (PetscInt)Ccsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "C nnz %" PetscInt_FMT " != %" PetscInt_FMT, Ccsr->num_entries, (PetscInt)Ccsr->values->size());
PetscCheck(Ccsr->num_entries == Acsr->num_entries + Bcsr->num_entries, PETSC_COMM_SELF, PETSC_ERR_COR, "C nnz %" PetscInt_FMT " != %" PetscInt_FMT " + %" PetscInt_FMT, Ccsr->num_entries, Acsr->num_entries, Bcsr->num_entries);
PetscCheck(Ccusp->cooPerm->size() == Ccsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "permSize %" PetscInt_FMT " != %" PetscInt_FMT, (PetscInt)Ccusp->cooPerm->size(), (PetscInt)Ccsr->values->size());
auto pmid = Ccusp->cooPerm->begin();
thrust::advance(pmid, Acsr->num_entries);
PetscCall(PetscLogGpuTimeBegin());
auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(), Ccusp->cooPerm->begin())));
auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(), pmid)));
thrust::for_each(zibait, zieait, VecCUDAEquals());
auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(), pmid)));
auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(), Ccusp->cooPerm->end())));
thrust::for_each(zibbit, ziebit, VecCUDAEquals());
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(*C, PETSC_FALSE));
if (A->form_explicit_transpose && B->form_explicit_transpose && (*C)->form_explicit_transpose) {
PetscCheck(Ccusp->matTranspose, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing transpose Mat_SeqAIJCUSPARSEMultStruct");
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
CsrMatrix *AcsrT = AT ? (CsrMatrix *)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix *)Bcusp->matTranspose->mat : NULL;
CsrMatrix *CcsrT = (CsrMatrix *)Ccusp->matTranspose->mat;
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(), AcsrT->values->end(), vT);
if (BT) thrust::copy(BcsrT->values->begin(), BcsrT->values->end(), vT);
(*C)->transupdated = PETSC_TRUE;
}
PetscCall(PetscLogGpuTimeEnd());
}
}
PetscCall(PetscObjectStateIncrease((PetscObject)*C));
(*C)->assembled = PETSC_TRUE;
(*C)->was_assembled = PETSC_FALSE;
(*C)->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[])
{
bool dmem;
const PetscScalar *av;
PetscFunctionBegin;
dmem = isCudaMem(v);
PetscCall(MatSeqAIJCUSPARSEGetArrayRead(A, &av));
if (n && idx) {
THRUSTINTARRAY widx(n);
widx.assign(idx, idx + n);
PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt)));
THRUSTARRAY *w = NULL;
thrust::device_ptr<PetscScalar> dv;
if (dmem) {
dv = thrust::device_pointer_cast(v);
} else {
w = new THRUSTARRAY(n);
dv = w->data();
}
thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av);
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav, widx.begin()), dv));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav, widx.end()), dv + n));
thrust::for_each(zibit, zieit, VecCUDAEquals());
if (w) PetscCallCUDA(hipMemcpy(v, w->data().get(), n * sizeof(PetscScalar), hipMemcpyDeviceToHost));
delete w;
} else {
PetscCallCUDA(hipMemcpy(v, av, n * sizeof(PetscScalar), dmem ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost));
}
if (!dmem) PetscCall(PetscLogCpuToGpu(n * sizeof(PetscScalar)));
PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(A, &av));
PetscFunctionReturn(PETSC_SUCCESS);
}
|
1d279a888d3171ce13f494f2cb6a4726f5499c17.cu
|
/*
Defines the basic matrix operations for the AIJ (compressed row)
matrix storage format using the CUSPARSE library,
*/
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <petscconf.h>
#include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
#include <../src/mat/impls/sbaij/seq/sbaij.h>
#include <../src/vec/vec/impls/dvecimpl.h>
#include <petsc/private/vecimpl.h>
#undef VecType
#include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h>
#include <thrust/adjacent_difference.h>
#if PETSC_CPP_VERSION >= 14
#define PETSC_HAVE_THRUST_ASYNC 1
// thrust::for_each(thrust::cuda::par.on()) requires C++14
#include <thrust/async/for_each.h>
#endif
#include <thrust/iterator/constant_iterator.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
const char *const MatCUSPARSEStorageFormats[] = {"CSR", "ELL", "HYB", "MatCUSPARSEStorageFormat", "MAT_CUSPARSE_", 0};
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
/* The following are copied from cusparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in
0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them.
typedef enum {
CUSPARSE_MV_ALG_DEFAULT = 0,
CUSPARSE_COOMV_ALG = 1,
CUSPARSE_CSRMV_ALG1 = 2,
CUSPARSE_CSRMV_ALG2 = 3
} cusparseSpMVAlg_t;
typedef enum {
CUSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0,
CUSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG1) = 1,
CUSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG2) = 2,
CUSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3,
CUSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_CSR_ALG1) = 4,
CUSPARSE_SPMM_ALG_DEFAULT = 0,
CUSPARSE_SPMM_COO_ALG1 = 1,
CUSPARSE_SPMM_COO_ALG2 = 2,
CUSPARSE_SPMM_COO_ALG3 = 3,
CUSPARSE_SPMM_COO_ALG4 = 5,
CUSPARSE_SPMM_CSR_ALG1 = 4,
CUSPARSE_SPMM_CSR_ALG2 = 6,
} cusparseSpMMAlg_t;
typedef enum {
CUSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministic
CUSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministic
} cusparseCsr2CscAlg_t;
*/
const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT", "COOMV_ALG", "CSRMV_ALG1", "CSRMV_ALG2", "cusparseSpMVAlg_t", "CUSPARSE_", 0};
const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT", "COO_ALG1", "COO_ALG2", "COO_ALG3", "CSR_ALG1", "COO_ALG4", "CSR_ALG2", "cusparseSpMMAlg_t", "CUSPARSE_SPMM_", 0};
const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID" /*cusparse does not have enum 0! We created one*/, "ALG1", "ALG2", "cusparseCsr2CscAlg_t", "CUSPARSE_CSR2CSC_", 0};
#endif
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, const MatFactorInfo *);
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, const MatFactorInfo *);
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat, Mat, const MatFactorInfo *);
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, IS, const MatFactorInfo *);
#if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0)
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat, Vec, Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec);
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat, Vec, Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat, Vec, Vec);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **);
#endif
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(Mat, PetscOptionItems *PetscOptionsObject);
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat, PetscScalar, Mat, MatStructure);
static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat, PetscScalar);
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat, Vec, Vec);
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec);
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec);
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec);
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec);
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec);
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec, PetscBool, PetscBool);
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **, MatCUSPARSEStorageFormat);
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors **);
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **);
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat);
static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat, PetscBool);
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat, PetscInt, const PetscInt[], PetscScalar[]);
static PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat, PetscCount, PetscInt[], PetscInt[]);
static PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat, const PetscScalar[], InsertMode);
PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A, MatCUSPARSEFormatOperation op, MatCUSPARSEStorageFormat format)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr;
PetscFunctionBegin;
switch (op) {
case MAT_CUSPARSE_MULT:
cusparsestruct->format = format;
break;
case MAT_CUSPARSE_ALL:
cusparsestruct->format = format;
break;
default:
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.", op);
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@
MatCUSPARSESetFormat - Sets the storage format of `MATSEQCUSPARSE` matrices for a particular
operation. Only the `MatMult()` operation can use different GPU storage formats
Not Collective
Input Parameters:
+ A - Matrix of type `MATSEQAIJCUSPARSE`
. op - `MatCUSPARSEFormatOperation`. `MATSEQAIJCUSPARSE` matrices support `MAT_CUSPARSE_MULT` and `MAT_CUSPARSE_ALL`.
`MATMPIAIJCUSPARSE` matrices support `MAT_CUSPARSE_MULT_DIAG`,`MAT_CUSPARSE_MULT_OFFDIAG`, and `MAT_CUSPARSE_ALL`.
- format - `MatCUSPARSEStorageFormat` (one of `MAT_CUSPARSE_CSR`, `MAT_CUSPARSE_ELL`, `MAT_CUSPARSE_HYB`.)
Level: intermediate
.seealso: [](chapter_matrices), `Mat`, `Mat`, `MATSEQAIJCUSPARSE`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation`
@*/
PetscErrorCode MatCUSPARSESetFormat(Mat A, MatCUSPARSEFormatOperation op, MatCUSPARSEStorageFormat format)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscTryMethod(A, "MatCUSPARSESetFormat_C", (Mat, MatCUSPARSEFormatOperation, MatCUSPARSEStorageFormat), (A, op, format));
PetscFunctionReturn(PETSC_SUCCESS);
}
PETSC_INTERN PetscErrorCode MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE(Mat A, PetscBool use_cpu)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr;
PetscFunctionBegin;
cusparsestruct->use_cpu_solve = use_cpu;
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@
MatCUSPARSESetUseCPUSolve - Sets to use CPU `MatSolve()`.
Input Parameters:
+ A - Matrix of type `MATSEQAIJCUSPARSE`
- use_cpu - set flag for using the built-in CPU `MatSolve()`
Level: intermediate
Note:
The cuSparse LU solver currently computes the factors with the built-in CPU method
and moves the factors to the GPU for the solve. We have observed better performance keeping the data on the CPU and computing the solve there.
This method to specify if the solve is done on the CPU or GPU (GPU is the default).
.seealso: [](chapter_matrices), `Mat`, `MatSolve()`, `MATSEQAIJCUSPARSE`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation`
@*/
PetscErrorCode MatCUSPARSESetUseCPUSolve(Mat A, PetscBool use_cpu)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscTryMethod(A, "MatCUSPARSESetUseCPUSolve_C", (Mat, PetscBool), (A, use_cpu));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatSetOption_SeqAIJCUSPARSE(Mat A, MatOption op, PetscBool flg)
{
PetscFunctionBegin;
switch (op) {
case MAT_FORM_EXPLICIT_TRANSPOSE:
/* need to destroy the transpose matrix if present to prevent from logic errors if flg is set to true later */
if (A->form_explicit_transpose && !flg) PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE));
A->form_explicit_transpose = flg;
break;
default:
PetscCall(MatSetOption_SeqAIJ(A, op, flg));
break;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(Mat A, PetscOptionItems *PetscOptionsObject)
{
MatCUSPARSEStorageFormat format;
PetscBool flg;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr;
PetscFunctionBegin;
PetscOptionsHeadBegin(PetscOptionsObject, "SeqAIJCUSPARSE options");
if (A->factortype == MAT_FACTOR_NONE) {
PetscCall(PetscOptionsEnum("-mat_cusparse_mult_storage_format", "sets storage format of (seq)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat", MatCUSPARSEStorageFormats, (PetscEnum)cusparsestruct->format, (PetscEnum *)&format, &flg));
if (flg) PetscCall(MatCUSPARSESetFormat(A, MAT_CUSPARSE_MULT, format));
PetscCall(PetscOptionsEnum("-mat_cusparse_storage_format", "sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", "MatCUSPARSESetFormat", MatCUSPARSEStorageFormats, (PetscEnum)cusparsestruct->format, (PetscEnum *)&format, &flg));
if (flg) PetscCall(MatCUSPARSESetFormat(A, MAT_CUSPARSE_ALL, format));
PetscCall(PetscOptionsBool("-mat_cusparse_use_cpu_solve", "Use CPU (I)LU solve", "MatCUSPARSESetUseCPUSolve", cusparsestruct->use_cpu_solve, &cusparsestruct->use_cpu_solve, &flg));
if (flg) PetscCall(MatCUSPARSESetUseCPUSolve(A, cusparsestruct->use_cpu_solve));
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscCall(PetscOptionsEnum("-mat_cusparse_spmv_alg", "sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)", "cusparseSpMVAlg_t", MatCUSPARSESpMVAlgorithms, (PetscEnum)cusparsestruct->spmvAlg, (PetscEnum *)&cusparsestruct->spmvAlg, &flg));
/* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
PetscCheck(!flg || CUSPARSE_SPMV_CSR_ALG1 == 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum cusparseSpMVAlg_t has been changed but PETSc has not been updated accordingly");
#else
PetscCheck(!flg || CUSPARSE_CSRMV_ALG1 == 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum cusparseSpMVAlg_t has been changed but PETSc has not been updated accordingly");
#endif
PetscCall(PetscOptionsEnum("-mat_cusparse_spmm_alg", "sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)", "cusparseSpMMAlg_t", MatCUSPARSESpMMAlgorithms, (PetscEnum)cusparsestruct->spmmAlg, (PetscEnum *)&cusparsestruct->spmmAlg, &flg));
PetscCheck(!flg || CUSPARSE_SPMM_CSR_ALG1 == 4, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum cusparseSpMMAlg_t has been changed but PETSc has not been updated accordingly");
PetscCall(
PetscOptionsEnum("-mat_cusparse_csr2csc_alg", "sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices", "cusparseCsr2CscAlg_t", MatCUSPARSECsr2CscAlgorithms, (PetscEnum)cusparsestruct->csr2cscAlg, (PetscEnum *)&cusparsestruct->csr2cscAlg, &flg));
PetscCheck(!flg || CUSPARSE_CSR2CSC_ALG1 == 1, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum cusparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly");
#endif
}
PetscOptionsHeadEnd();
PetscFunctionReturn(PETSC_SUCCESS);
}
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
static PetscErrorCode MatSeqAIJCUSPARSEBuildFactoredMatrix_LU(Mat A)
{
Mat_SeqAIJ *a = static_cast<Mat_SeqAIJ *>(A->data);
PetscInt m = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr);
const PetscInt *Ai = a->i, *Aj = a->j, *Adiag = a->diag;
const MatScalar *Aa = a->a;
PetscInt *Mi, *Mj, Mnz;
PetscScalar *Ma;
PetscFunctionBegin;
if (A->offloadmask == PETSC_OFFLOAD_CPU) { // A's latest factors are on CPU
if (!fs->csrRowPtr) { // Is't the first time to do the setup? Use csrRowPtr since it is not null even when m=0
// Re-arrange the (skewed) factored matrix and put the result into M, a regular csr matrix on host
Mnz = (Ai[m] - Ai[0]) + (Adiag[0] - Adiag[m]); // Lnz (without the unit diagonal) + Unz (with the non-unit diagonal)
PetscCall(PetscMalloc1(m + 1, &Mi));
PetscCall(PetscMalloc1(Mnz, &Mj)); // Mj is temp
PetscCall(PetscMalloc1(Mnz, &Ma));
Mi[0] = 0;
for (PetscInt i = 0; i < m; i++) {
PetscInt llen = Ai[i + 1] - Ai[i];
PetscInt ulen = Adiag[i] - Adiag[i + 1];
PetscCall(PetscArraycpy(Mj + Mi[i], Aj + Ai[i], llen)); // entries of L
Mj[Mi[i] + llen] = i; // diagonal entry
PetscCall(PetscArraycpy(Mj + Mi[i] + llen + 1, Aj + Adiag[i + 1] + 1, ulen - 1)); // entries of U on the right of the diagonal
Mi[i + 1] = Mi[i] + llen + ulen;
}
// Copy M (L,U) from host to device
PetscCallCUDA(cudaMalloc(&fs->csrRowPtr, sizeof(*(fs->csrRowPtr)) * (m + 1)));
PetscCallCUDA(cudaMalloc(&fs->csrColIdx, sizeof(*(fs->csrColIdx)) * Mnz));
PetscCallCUDA(cudaMalloc(&fs->csrVal, sizeof(*(fs->csrVal)) * Mnz));
PetscCallCUDA(cudaMemcpy(fs->csrRowPtr, Mi, sizeof(*(fs->csrRowPtr)) * (m + 1), cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(fs->csrColIdx, Mj, sizeof(*(fs->csrColIdx)) * Mnz, cudaMemcpyHostToDevice));
// Create descriptors for L, U. See https://docs.nvidia.com/cuda/cusparse/index.html#cusparseDiagType_t
// cusparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always
// assumed to be present, but if CUSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that
// all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine
// assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory.
cusparseFillMode_t fillMode = CUSPARSE_FILL_MODE_LOWER;
cusparseDiagType_t diagType = CUSPARSE_DIAG_TYPE_UNIT;
const cusparseIndexType_t indexType = PetscDefined(USE_64BIT_INDICES) ? CUSPARSE_INDEX_64I : CUSPARSE_INDEX_32I;
PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_L, m, m, Mnz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, indexType, indexType, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype));
PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode)));
PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType)));
fillMode = CUSPARSE_FILL_MODE_UPPER;
diagType = CUSPARSE_DIAG_TYPE_NON_UNIT;
PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_U, m, m, Mnz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, indexType, indexType, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype));
PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_U, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode)));
PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_U, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType)));
// Allocate work vectors in SpSv
PetscCallCUDA(cudaMalloc((void **)&fs->X, sizeof(*(fs->X)) * m));
PetscCallCUDA(cudaMalloc((void **)&fs->Y, sizeof(*(fs->Y)) * m));
PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype));
PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype));
// Query buffer sizes for SpSV and then allocate buffers, temporarily assuming opA = CUSPARSE_OPERATION_NON_TRANSPOSE
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L));
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_U));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, &fs->spsvBufferSize_U));
PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_U, fs->spsvBufferSize_U));
PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L));
// Record for reuse
fs->csrRowPtr_h = Mi;
fs->csrVal_h = Ma;
PetscCall(PetscFree(Mj));
}
// Copy the value
Mi = fs->csrRowPtr_h;
Ma = fs->csrVal_h;
Mnz = Mi[m];
for (PetscInt i = 0; i < m; i++) {
PetscInt llen = Ai[i + 1] - Ai[i];
PetscInt ulen = Adiag[i] - Adiag[i + 1];
PetscCall(PetscArraycpy(Ma + Mi[i], Aa + Ai[i], llen)); // entries of L
Ma[Mi[i] + llen] = (MatScalar)1.0 / Aa[Adiag[i]]; // recover the diagonal entry
PetscCall(PetscArraycpy(Ma + Mi[i] + llen + 1, Aa + Adiag[i + 1] + 1, ulen - 1)); // entries of U on the right of the diagonal
}
PetscCallCUDA(cudaMemcpy(fs->csrVal, Ma, sizeof(*Ma) * Mnz, cudaMemcpyHostToDevice));
// Do cusparseSpSV_analysis(), which is numeric and requires valid and up-to-date matrix values
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L));
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, fs->spsvBuffer_U));
// L, U values have changed, reset the flag to indicate we need to redo cusparseSpSV_analysis() for transpose solve
fs->updatedTransposeSpSVAnalysis = PETSC_FALSE;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
#else
static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr;
const PetscInt *ai = a->i, *aj = a->j, *vi;
const MatScalar *aa = a->a, *v;
PetscInt *AiLo, *AjLo;
PetscInt i, nz, nzLower, offset, rowOffset;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(PETSC_SUCCESS);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */
nzLower = n + ai[n] - ai[1];
if (!loTriFactor) {
PetscScalar *AALo;
PetscCallCUDA(cudaMallocHost((void **)&AALo, nzLower * sizeof(PetscScalar)));
/* Allocate Space for the lower triangular matrix */
PetscCallCUDA(cudaMallocHost((void **)&AiLo, (n + 1) * sizeof(PetscInt)));
PetscCallCUDA(cudaMallocHost((void **)&AjLo, nzLower * sizeof(PetscInt)));
/* Fill the lower triangular matrix */
AiLo[0] = (PetscInt)0;
AiLo[n] = nzLower;
AjLo[0] = (PetscInt)0;
AALo[0] = (MatScalar)1.0;
v = aa;
vi = aj;
offset = 1;
rowOffset = 1;
for (i = 1; i < n; i++) {
nz = ai[i + 1] - ai[i];
/* additional 1 for the term on the diagonal */
AiLo[i] = rowOffset;
rowOffset += nz + 1;
PetscCall(PetscArraycpy(&(AjLo[offset]), vi, nz));
PetscCall(PetscArraycpy(&(AALo[offset]), v, nz));
offset += nz;
AjLo[offset] = (PetscInt)i;
AALo[offset] = (MatScalar)1.0;
offset += 1;
v += nz;
vi += nz;
}
/* allocate space for the triangular factor information */
PetscCall(PetscNew(&loTriFactor));
loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
PetscCallCUSPARSE(cusparseCreateMatDescr(&loTriFactor->descr));
PetscCallCUSPARSE(cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL));
#else
PetscCallCUSPARSE(cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR));
#endif
PetscCallCUSPARSE(cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_LOWER));
PetscCallCUSPARSE(cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT));
/* set the operation */
loTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = n;
loTriFactor->csrMat->num_cols = n;
loTriFactor->csrMat->num_entries = nzLower;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n + 1);
loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo + n + 1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower);
loTriFactor->csrMat->column_indices->assign(AjLo, AjLo + nzLower);
loTriFactor->csrMat->values = new THRUSTARRAY(nzLower);
loTriFactor->csrMat->values->assign(AALo, AALo + nzLower);
/* Create the solve analysis information */
PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactor->solveInfo));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize));
PetscCallCUDA(cudaMalloc(&loTriFactor->solveBuffer, loTriFactor->solveBufferSize));
#endif
/* perform the solve analysis */
PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer));
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtr = loTriFactor;
loTriFactor->AA_h = AALo;
PetscCallCUDA(cudaFreeHost(AiLo));
PetscCallCUDA(cudaFreeHost(AjLo));
PetscCall(PetscLogCpuToGpu((n + 1 + nzLower) * sizeof(int) + nzLower * sizeof(PetscScalar)));
} else { /* update values only */
if (!loTriFactor->AA_h) PetscCallCUDA(cudaMallocHost((void **)&loTriFactor->AA_h, nzLower * sizeof(PetscScalar)));
/* Fill the lower triangular matrix */
loTriFactor->AA_h[0] = 1.0;
v = aa;
vi = aj;
offset = 1;
for (i = 1; i < n; i++) {
nz = ai[i + 1] - ai[i];
PetscCall(PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz));
offset += nz;
loTriFactor->AA_h[offset] = 1.0;
offset += 1;
v += nz;
}
loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h + nzLower);
PetscCall(PetscLogCpuToGpu(nzLower * sizeof(PetscScalar)));
}
} catch (char *ex) {
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr;
const PetscInt *aj = a->j, *adiag = a->diag, *vi;
const MatScalar *aa = a->a, *v;
PetscInt *AiUp, *AjUp;
PetscInt i, nz, nzUpper, offset;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(PETSC_SUCCESS);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* next, figure out the number of nonzeros in the upper triangular matrix. */
nzUpper = adiag[0] - adiag[n];
if (!upTriFactor) {
PetscScalar *AAUp;
PetscCallCUDA(cudaMallocHost((void **)&AAUp, nzUpper * sizeof(PetscScalar)));
/* Allocate Space for the upper triangular matrix */
PetscCallCUDA(cudaMallocHost((void **)&AiUp, (n + 1) * sizeof(PetscInt)));
PetscCallCUDA(cudaMallocHost((void **)&AjUp, nzUpper * sizeof(PetscInt)));
/* Fill the upper triangular matrix */
AiUp[0] = (PetscInt)0;
AiUp[n] = nzUpper;
offset = nzUpper;
for (i = n - 1; i >= 0; i--) {
v = aa + adiag[i + 1] + 1;
vi = aj + adiag[i + 1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i + 1] - 1;
/* decrement the offset */
offset -= (nz + 1);
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt)i;
AAUp[offset] = (MatScalar)1. / v[nz];
AiUp[i] = AiUp[i + 1] - (nz + 1);
PetscCall(PetscArraycpy(&(AjUp[offset + 1]), vi, nz));
PetscCall(PetscArraycpy(&(AAUp[offset + 1]), v, nz));
}
/* allocate space for the triangular factor information */
PetscCall(PetscNew(&upTriFactor));
upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
PetscCallCUSPARSE(cusparseCreateMatDescr(&upTriFactor->descr));
PetscCallCUSPARSE(cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL));
#else
PetscCallCUSPARSE(cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR));
#endif
PetscCallCUSPARSE(cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER));
PetscCallCUSPARSE(cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT));
/* set the operation */
upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = n;
upTriFactor->csrMat->num_cols = n;
upTriFactor->csrMat->num_entries = nzUpper;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n + 1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + n + 1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp + nzUpper);
upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper);
upTriFactor->csrMat->values->assign(AAUp, AAUp + nzUpper);
/* Create the solve analysis information */
PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactor->solveInfo));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize));
PetscCallCUDA(cudaMalloc(&upTriFactor->solveBuffer, upTriFactor->solveBufferSize));
#endif
/* perform the solve analysis */
PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer));
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtr = upTriFactor;
upTriFactor->AA_h = AAUp;
PetscCallCUDA(cudaFreeHost(AiUp));
PetscCallCUDA(cudaFreeHost(AjUp));
PetscCall(PetscLogCpuToGpu((n + 1 + nzUpper) * sizeof(int) + nzUpper * sizeof(PetscScalar)));
} else {
if (!upTriFactor->AA_h) PetscCallCUDA(cudaMallocHost((void **)&upTriFactor->AA_h, nzUpper * sizeof(PetscScalar)));
/* Fill the upper triangular matrix */
offset = nzUpper;
for (i = n - 1; i >= 0; i--) {
v = aa + adiag[i + 1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i + 1] - 1;
/* decrement the offset */
offset -= (nz + 1);
/* first, set the diagonal elements */
upTriFactor->AA_h[offset] = 1. / v[nz];
PetscCall(PetscArraycpy(&(upTriFactor->AA_h[offset + 1]), v, nz));
}
upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h + nzUpper);
PetscCall(PetscLogCpuToGpu(nzUpper * sizeof(PetscScalar)));
}
} catch (char *ex) {
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(PETSC_SUCCESS);
}
#endif
static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
IS isrow = a->row, iscol = a->icol;
PetscBool row_identity, col_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors");
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
PetscCall(MatSeqAIJCUSPARSEBuildFactoredMatrix_LU(A));
#else
PetscCall(MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A));
PetscCall(MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A));
if (!cusparseTriFactors->workVector) cusparseTriFactors->workVector = new THRUSTARRAY(n);
#endif
cusparseTriFactors->nnz = a->nz;
A->offloadmask = PETSC_OFFLOAD_BOTH; // factored matrix is sync'ed to GPU
/* lower triangular indices */
PetscCall(ISIdentity(isrow, &row_identity));
if (!row_identity && !cusparseTriFactors->rpermIndices) {
const PetscInt *r;
PetscCall(ISGetIndices(isrow, &r));
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(r, r + n);
PetscCall(ISRestoreIndices(isrow, &r));
PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt)));
}
/* upper triangular indices */
PetscCall(ISIdentity(iscol, &col_identity));
if (!col_identity && !cusparseTriFactors->cpermIndices) {
const PetscInt *c;
PetscCall(ISGetIndices(iscol, &c));
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(c, c + n);
PetscCall(ISRestoreIndices(iscol, &c));
PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt)));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
static PetscErrorCode MatSeqAIJCUSPARSEBuildFactoredMatrix_Cheolesky(Mat A)
{
Mat_SeqAIJ *a = static_cast<Mat_SeqAIJ *>(A->data);
PetscInt m = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr);
const PetscInt *Ai = a->i, *Aj = a->j, *Adiag = a->diag;
const MatScalar *Aa = a->a;
PetscInt *Mj, Mnz;
PetscScalar *Ma, *D;
PetscFunctionBegin;
if (A->offloadmask == PETSC_OFFLOAD_CPU) { // A's latest factors are on CPU
if (!fs->csrRowPtr) { // Is't the first time to do the setup? Use csrRowPtr since it is not null even m=0
// Re-arrange the (skewed) factored matrix and put the result into M, a regular csr matrix on host.
// See comments at MatICCFactorSymbolic_SeqAIJ() on the layout of the factored matrix (U) on host.
Mnz = Ai[m]; // Unz (with the unit diagonal)
PetscCall(PetscMalloc1(Mnz, &Ma));
PetscCall(PetscMalloc1(Mnz, &Mj)); // Mj[] is temp
PetscCall(PetscMalloc1(m, &D)); // the diagonal
for (PetscInt i = 0; i < m; i++) {
PetscInt ulen = Ai[i + 1] - Ai[i];
Mj[Ai[i]] = i; // diagonal entry
PetscCall(PetscArraycpy(Mj + Ai[i] + 1, Aj + Ai[i], ulen - 1)); // entries of U on the right of the diagonal
}
// Copy M (U) from host to device
PetscCallCUDA(cudaMalloc(&fs->csrRowPtr, sizeof(*(fs->csrRowPtr)) * (m + 1)));
PetscCallCUDA(cudaMalloc(&fs->csrColIdx, sizeof(*(fs->csrColIdx)) * Mnz));
PetscCallCUDA(cudaMalloc(&fs->csrVal, sizeof(*(fs->csrVal)) * Mnz));
PetscCallCUDA(cudaMalloc(&fs->diag, sizeof(*(fs->diag)) * m));
PetscCallCUDA(cudaMemcpy(fs->csrRowPtr, Ai, sizeof(*Ai) * (m + 1), cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(fs->csrColIdx, Mj, sizeof(*Mj) * Mnz, cudaMemcpyHostToDevice));
// Create descriptors for L, U. See https://docs.nvidia.com/cuda/cusparse/index.html#cusparseDiagType_t
// cusparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always
// assumed to be present, but if CUSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that
// all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine
// assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory.
cusparseFillMode_t fillMode = CUSPARSE_FILL_MODE_UPPER;
cusparseDiagType_t diagType = CUSPARSE_DIAG_TYPE_UNIT; // U is unit diagonal
const cusparseIndexType_t indexType = PetscDefined(USE_64BIT_INDICES) ? CUSPARSE_INDEX_64I : CUSPARSE_INDEX_32I;
PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_U, m, m, Mnz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, indexType, indexType, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype));
PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_U, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode)));
PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_U, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType)));
// Allocate work vectors in SpSv
PetscCallCUDA(cudaMalloc((void **)&fs->X, sizeof(*(fs->X)) * m));
PetscCallCUDA(cudaMalloc((void **)&fs->Y, sizeof(*(fs->Y)) * m));
PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype));
PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype));
// Query buffer sizes for SpSV and then allocate buffers
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_U));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, &fs->spsvBufferSize_U));
PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_U, fs->spsvBufferSize_U));
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Ut)); // Ut solve uses the same matrix (spMatDescr_U), but different descr and buffer
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Ut, &fs->spsvBufferSize_Ut));
PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_Ut, fs->spsvBufferSize_Ut));
// Record for reuse
fs->csrVal_h = Ma;
fs->diag_h = D;
PetscCall(PetscFree(Mj));
}
// Copy the value
Ma = fs->csrVal_h;
D = fs->diag_h;
Mnz = Ai[m];
for (PetscInt i = 0; i < m; i++) {
D[i] = Aa[Adiag[i]]; // actually Aa[Adiag[i]] is the inverse of the diagonal
Ma[Ai[i]] = (MatScalar)1.0; // set the unit diagonal, which is cosmetic since cusparse does not really read it given CUSPARSE_DIAG_TYPE_UNIT
for (PetscInt k = 0; k < Ai[i + 1] - Ai[i] - 1; k++) Ma[Ai[i] + 1 + k] = -Aa[Ai[i] + k];
}
PetscCallCUDA(cudaMemcpy(fs->csrVal, Ma, sizeof(*Ma) * Mnz, cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(fs->diag, D, sizeof(*D) * m, cudaMemcpyHostToDevice));
// Do cusparseSpSV_analysis(), which is numeric and requires valid and up-to-date matrix values
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, fs->spsvBuffer_U));
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Ut, fs->spsvBuffer_Ut));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
// Solve Ut D U x = b
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_Cholesky(Mat A, Vec b, Vec x)
{
Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr);
Mat_SeqAIJ *aij = static_cast<Mat_SeqAIJ *>(A->data);
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
const cusparseSpSVAlg_t alg = CUSPARSE_SPSV_ALG_DEFAULT;
PetscInt m = A->rmap->n;
PetscFunctionBegin;
PetscCall(PetscLogGpuTimeBegin());
PetscCall(VecCUDAGetArrayWrite(x, &xarray));
PetscCall(VecCUDAGetArrayRead(b, &barray));
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
// Reorder b with the row permutation if needed, and wrap the result in fs->X
if (fs->rpermIndices) {
PetscCallThrust(thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->end()), thrust::device_pointer_cast(fs->X)));
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, fs->X));
} else {
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray));
}
// Solve Ut Y = X
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y));
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut));
// Solve diag(D) Z = Y. Actually just do Y = Y*D since D is already inverted in MatCholeskyFactorNumeric_SeqAIJ().
// It is basically a vector element-wise multiplication, but cublas does not have it!
PetscCallThrust(thrust::transform(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::device_pointer_cast(fs->Y), thrust::device_pointer_cast(fs->Y + m), thrust::device_pointer_cast(fs->diag), thrust::device_pointer_cast(fs->Y), thrust::multiplies<PetscScalar>()));
// Solve U X = Y
if (fs->cpermIndices) { // if need to permute, we need to use the intermediate buffer X
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, fs->X));
} else {
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, xarray));
}
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, alg, fs->spsvDescr_U));
// Reorder X with the column permutation if needed, and put the result back to x
if (fs->cpermIndices) {
PetscCallThrust(thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X), fs->cpermIndices->begin()),
thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X + m), fs->cpermIndices->end()), xGPU));
}
PetscCall(VecCUDARestoreArrayRead(b, &barray));
PetscCall(VecCUDARestoreArrayWrite(x, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(4.0 * aij->nz - A->rmap->n));
PetscFunctionReturn(PETSC_SUCCESS);
}
#else
static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr;
PetscInt *AiUp, *AjUp;
PetscScalar *AAUp;
PetscScalar *AALo;
PetscInt nzUpper = a->nz, n = A->rmap->n, i, offset, nz, j;
Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ *)A->data;
const PetscInt *ai = b->i, *aj = b->j, *vj;
const MatScalar *aa = b->a, *v;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(PETSC_SUCCESS);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
PetscCallCUDA(cudaMallocHost((void **)&AAUp, nzUpper * sizeof(PetscScalar)));
PetscCallCUDA(cudaMallocHost((void **)&AALo, nzUpper * sizeof(PetscScalar)));
if (!upTriFactor && !loTriFactor) {
/* Allocate Space for the upper triangular matrix */
PetscCallCUDA(cudaMallocHost((void **)&AiUp, (n + 1) * sizeof(PetscInt)));
PetscCallCUDA(cudaMallocHost((void **)&AjUp, nzUpper * sizeof(PetscInt)));
/* Fill the upper triangular matrix */
AiUp[0] = (PetscInt)0;
AiUp[n] = nzUpper;
offset = 0;
for (i = 0; i < n; i++) {
/* set the pointers */
v = aa + ai[i];
vj = aj + ai[i];
nz = ai[i + 1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt)i;
AAUp[offset] = (MatScalar)1.0 / v[nz];
AiUp[i] = offset;
AALo[offset] = (MatScalar)1.0 / v[nz];
offset += 1;
if (nz > 0) {
PetscCall(PetscArraycpy(&(AjUp[offset]), vj, nz));
PetscCall(PetscArraycpy(&(AAUp[offset]), v, nz));
for (j = offset; j < offset + nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j] / v[nz];
}
offset += nz;
}
}
/* allocate space for the triangular factor information */
PetscCall(PetscNew(&upTriFactor));
upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
PetscCallCUSPARSE(cusparseCreateMatDescr(&upTriFactor->descr));
PetscCallCUSPARSE(cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL));
#else
PetscCallCUSPARSE(cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR));
#endif
PetscCallCUSPARSE(cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER));
PetscCallCUSPARSE(cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT));
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = A->rmap->n;
upTriFactor->csrMat->num_cols = A->cmap->n;
upTriFactor->csrMat->num_entries = a->nz;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + A->rmap->n + 1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp + a->nz);
upTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
upTriFactor->csrMat->values->assign(AAUp, AAUp + a->nz);
/* set the operation */
upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* Create the solve analysis information */
PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactor->solveInfo));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize));
PetscCallCUDA(cudaMalloc(&upTriFactor->solveBuffer, upTriFactor->solveBufferSize));
#endif
/* perform the solve analysis */
PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer));
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtr = upTriFactor;
/* allocate space for the triangular factor information */
PetscCall(PetscNew(&loTriFactor));
loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
PetscCallCUSPARSE(cusparseCreateMatDescr(&loTriFactor->descr));
PetscCallCUSPARSE(cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL));
#else
PetscCallCUSPARSE(cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR));
#endif
PetscCallCUSPARSE(cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_UPPER));
PetscCallCUSPARSE(cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT));
/* set the operation */
loTriFactor->solveOp = CUSPARSE_OPERATION_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = A->rmap->n;
loTriFactor->csrMat->num_cols = A->cmap->n;
loTriFactor->csrMat->num_entries = a->nz;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1);
loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + A->rmap->n + 1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
loTriFactor->csrMat->column_indices->assign(AjUp, AjUp + a->nz);
loTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo + a->nz);
/* Create the solve analysis information */
PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactor->solveInfo));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize));
PetscCallCUDA(cudaMalloc(&loTriFactor->solveBuffer, loTriFactor->solveBufferSize));
#endif
/* perform the solve analysis */
PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer));
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtr = loTriFactor;
PetscCall(PetscLogCpuToGpu(2 * (((A->rmap->n + 1) + (a->nz)) * sizeof(int) + (a->nz) * sizeof(PetscScalar))));
PetscCallCUDA(cudaFreeHost(AiUp));
PetscCallCUDA(cudaFreeHost(AjUp));
} else {
/* Fill the upper triangular matrix */
offset = 0;
for (i = 0; i < n; i++) {
/* set the pointers */
v = aa + ai[i];
nz = ai[i + 1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AAUp[offset] = 1.0 / v[nz];
AALo[offset] = 1.0 / v[nz];
offset += 1;
if (nz > 0) {
PetscCall(PetscArraycpy(&(AAUp[offset]), v, nz));
for (j = offset; j < offset + nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j] / v[nz];
}
offset += nz;
}
}
PetscCheck(upTriFactor, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors");
PetscCheck(loTriFactor, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors");
upTriFactor->csrMat->values->assign(AAUp, AAUp + a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo + a->nz);
PetscCall(PetscLogCpuToGpu(2 * (a->nz) * sizeof(PetscScalar)));
}
PetscCallCUDA(cudaFreeHost(AAUp));
PetscCallCUDA(cudaFreeHost(AALo));
} catch (char *ex) {
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(PETSC_SUCCESS);
}
#endif
static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
IS ip = a->row;
PetscBool perm_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors");
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
PetscCall(MatSeqAIJCUSPARSEBuildFactoredMatrix_Cheolesky(A));
#else
PetscCall(MatSeqAIJCUSPARSEBuildICCTriMatrices(A));
if (!cusparseTriFactors->workVector) cusparseTriFactors->workVector = new THRUSTARRAY(n);
#endif
cusparseTriFactors->nnz = (a->nz - n) * 2 + n;
A->offloadmask = PETSC_OFFLOAD_BOTH;
/* lower triangular indices */
PetscCall(ISIdentity(ip, &perm_identity));
if (!perm_identity) {
IS iip;
const PetscInt *irip, *rip;
PetscCall(ISInvertPermutation(ip, PETSC_DECIDE, &iip));
PetscCall(ISGetIndices(iip, &irip));
PetscCall(ISGetIndices(ip, &rip));
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(rip, rip + n);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(irip, irip + n);
PetscCall(ISRestoreIndices(iip, &irip));
PetscCall(ISDestroy(&iip));
PetscCall(ISRestoreIndices(ip, &rip));
PetscCall(PetscLogCpuToGpu(2. * n * sizeof(PetscInt)));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B, Mat A, const MatFactorInfo *info)
{
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A));
PetscCall(MatCholeskyFactorNumeric_SeqAIJ(B, A, info));
B->offloadmask = PETSC_OFFLOAD_CPU;
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
B->ops->solve = MatSolve_SeqAIJCUSPARSE_Cholesky;
B->ops->solvetranspose = MatSolve_SeqAIJCUSPARSE_Cholesky;
#else
/* determine which version of MatSolve needs to be used. */
Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data;
IS ip = b->row;
PetscBool perm_identity;
PetscCall(ISIdentity(ip, &perm_identity));
if (perm_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
}
#endif
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
/* get the triangular factors */
PetscCall(MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B));
PetscFunctionReturn(PETSC_SUCCESS);
}
#if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0)
static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT;
cusparseIndexBase_t indexBase;
cusparseMatrixType_t matrixType;
cusparseFillMode_t fillMode;
cusparseDiagType_t diagType;
PetscFunctionBegin;
/* allocate space for the transpose of the lower triangular factor */
PetscCall(PetscNew(&loTriFactorT));
loTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the lower triangular factor */
matrixType = cusparseGetMatType(loTriFactor->descr);
indexBase = cusparseGetMatIndexBase(loTriFactor->descr);
fillMode = cusparseGetMatFillMode(loTriFactor->descr) == CUSPARSE_FILL_MODE_UPPER ? CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(loTriFactor->descr);
/* Create the matrix description */
PetscCallCUSPARSE(cusparseCreateMatDescr(&loTriFactorT->descr));
PetscCallCUSPARSE(cusparseSetMatIndexBase(loTriFactorT->descr, indexBase));
PetscCallCUSPARSE(cusparseSetMatType(loTriFactorT->descr, matrixType));
PetscCallCUSPARSE(cusparseSetMatFillMode(loTriFactorT->descr, fillMode));
PetscCallCUSPARSE(cusparseSetMatDiagType(loTriFactorT->descr, diagType));
/* set the operation */
loTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the lower triangular factor*/
loTriFactorT->csrMat = new CsrMatrix;
loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols;
loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows;
loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries;
loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows + 1);
loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries);
loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries);
/* compute the transpose of the lower triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscCallCUSPARSE(cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize));
PetscCallCUDA(cudaMalloc(&loTriFactor->csr2cscBuffer, loTriFactor->csr2cscBufferSize));
#endif
PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0));
{
// there is no clean way to have PetscCallCUSPARSE wrapping this function...
auto stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer);
#else
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase);
#endif
PetscCallCUSPARSE(stat);
}
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0));
/* Create the solve analysis information */
PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactorT->solveInfo));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, &loTriFactorT->solveBufferSize));
PetscCallCUDA(cudaMalloc(&loTriFactorT->solveBuffer, loTriFactorT->solveBufferSize));
#endif
/* perform the solve analysis */
PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer));
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtrTranspose = loTriFactorT;
/*********************************************/
/* Now the Transpose of the Upper Tri Factor */
/*********************************************/
/* allocate space for the transpose of the upper triangular factor */
PetscCall(PetscNew(&upTriFactorT));
upTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the upper triangular factor */
matrixType = cusparseGetMatType(upTriFactor->descr);
indexBase = cusparseGetMatIndexBase(upTriFactor->descr);
fillMode = cusparseGetMatFillMode(upTriFactor->descr) == CUSPARSE_FILL_MODE_UPPER ? CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(upTriFactor->descr);
/* Create the matrix description */
PetscCallCUSPARSE(cusparseCreateMatDescr(&upTriFactorT->descr));
PetscCallCUSPARSE(cusparseSetMatIndexBase(upTriFactorT->descr, indexBase));
PetscCallCUSPARSE(cusparseSetMatType(upTriFactorT->descr, matrixType));
PetscCallCUSPARSE(cusparseSetMatFillMode(upTriFactorT->descr, fillMode));
PetscCallCUSPARSE(cusparseSetMatDiagType(upTriFactorT->descr, diagType));
/* set the operation */
upTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the upper triangular factor*/
upTriFactorT->csrMat = new CsrMatrix;
upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols;
upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows;
upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries;
upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows + 1);
upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries);
upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries);
/* compute the transpose of the upper triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscCallCUSPARSE(cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize));
PetscCallCUDA(cudaMalloc(&upTriFactor->csr2cscBuffer, upTriFactor->csr2cscBufferSize));
#endif
PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0));
{
// there is no clean way to have PetscCallCUSPARSE wrapping this function...
auto stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer);
#else
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase);
#endif
PetscCallCUSPARSE(stat);
}
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0));
/* Create the solve analysis information */
PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactorT->solveInfo));
#if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, &upTriFactorT->solveBufferSize));
PetscCallCUDA(cudaMalloc(&upTriFactorT->solveBuffer, upTriFactorT->solveBufferSize));
#endif
/* perform the solve analysis */
/* christ, would it have killed you to put this stuff in a function????????? */
PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, upTriFactorT->solvePolicy, upTriFactorT->solveBuffer));
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0));
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtrTranspose = upTriFactorT;
PetscFunctionReturn(PETSC_SUCCESS);
}
#endif
struct PetscScalarToPetscInt {
__host__ __device__ PetscInt operator()(PetscScalar s) { return (PetscInt)PetscRealPart(s); }
};
static PetscErrorCode MatSeqAIJCUSPARSEFormExplicitTranspose(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct, *matstructT;
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
cusparseStatus_t stat;
cusparseIndexBase_t indexBase;
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat;
PetscCheck(matstruct, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing mat struct");
matstructT = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->matTranspose;
PetscCheck(!A->transupdated || matstructT, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing matTranspose struct");
if (A->transupdated) PetscFunctionReturn(PETSC_SUCCESS);
PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0));
PetscCall(PetscLogGpuTimeBegin());
if (cusparsestruct->format != MAT_CUSPARSE_CSR) PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE));
if (!cusparsestruct->matTranspose) { /* create cusparse matrix */
matstructT = new Mat_SeqAIJCUSPARSEMultStruct;
PetscCallCUSPARSE(cusparseCreateMatDescr(&matstructT->descr));
indexBase = cusparseGetMatIndexBase(matstruct->descr);
PetscCallCUSPARSE(cusparseSetMatIndexBase(matstructT->descr, indexBase));
PetscCallCUSPARSE(cusparseSetMatType(matstructT->descr, CUSPARSE_MATRIX_TYPE_GENERAL));
/* set alpha and beta */
PetscCallCUDA(cudaMalloc((void **)&(matstructT->alpha_one), sizeof(PetscScalar)));
PetscCallCUDA(cudaMalloc((void **)&(matstructT->beta_zero), sizeof(PetscScalar)));
PetscCallCUDA(cudaMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar)));
PetscCallCUDA(cudaMemcpy(matstructT->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(matstructT->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice));
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *matrixT = new CsrMatrix;
matstructT->mat = matrixT;
matrixT->num_rows = A->cmap->n;
matrixT->num_cols = A->rmap->n;
matrixT->num_entries = a->nz;
matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows + 1);
matrixT->column_indices = new THRUSTINTARRAY32(a->nz);
matrixT->values = new THRUSTARRAY(a->nz);
if (!cusparsestruct->rowoffsets_gpu) cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
cusparsestruct->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1);
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
#if PETSC_PKG_CUDA_VERSION_GE(11, 2, 1)
stat = cusparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */
indexBase, cusparse_scalartype);
PetscCallCUSPARSE(stat);
#else
/* cusparse-11.x returns errors with zero-sized matrices until 11.2.1,
see https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cusparse-11.2.1
I don't know what a proper value should be for matstructT->matDescr with empty matrices, so I just set
it to NULL to blow it up if one relies on it. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2,
when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I also set it accordingly.
*/
if (matrixT->num_entries) {
stat = cusparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, indexBase, cusparse_scalartype);
PetscCallCUSPARSE(stat);
} else {
matstructT->matDescr = NULL;
matrixT->row_offsets->assign(matrixT->row_offsets->size(), indexBase);
}
#endif
#endif
} else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *temp = new CsrMatrix;
CsrMatrix *tempT = new CsrMatrix;
/* First convert HYB to CSR */
temp->num_rows = A->rmap->n;
temp->num_cols = A->cmap->n;
temp->num_entries = a->nz;
temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1);
temp->column_indices = new THRUSTINTARRAY32(a->nz);
temp->values = new THRUSTARRAY(a->nz);
stat = cusparse_hyb2csr(cusparsestruct->handle, matstruct->descr, (cusparseHybMat_t)matstruct->mat, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get());
PetscCallCUSPARSE(stat);
/* Next, convert CSR to CSC (i.e. the matrix transpose) */
tempT->num_rows = A->rmap->n;
tempT->num_cols = A->cmap->n;
tempT->num_entries = a->nz;
tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1);
tempT->column_indices = new THRUSTINTARRAY32(a->nz);
tempT->values = new THRUSTARRAY(a->nz);
stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, temp->num_cols, temp->num_entries, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get(), tempT->values->data().get(),
tempT->column_indices->data().get(), tempT->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase);
PetscCallCUSPARSE(stat);
/* Last, convert CSC to HYB */
cusparseHybMat_t hybMat;
PetscCallCUSPARSE(cusparseCreateHybMat(&hybMat));
cusparseHybPartition_t partition = cusparsestruct->format == MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, matstructT->descr, tempT->values->data().get(), tempT->row_offsets->data().get(), tempT->column_indices->data().get(), hybMat, 0, partition);
PetscCallCUSPARSE(stat);
/* assign the pointer */
matstructT->mat = hybMat;
A->transupdated = PETSC_TRUE;
/* delete temporaries */
if (tempT) {
if (tempT->values) delete (THRUSTARRAY *)tempT->values;
if (tempT->column_indices) delete (THRUSTINTARRAY32 *)tempT->column_indices;
if (tempT->row_offsets) delete (THRUSTINTARRAY32 *)tempT->row_offsets;
delete (CsrMatrix *)tempT;
}
if (temp) {
if (temp->values) delete (THRUSTARRAY *)temp->values;
if (temp->column_indices) delete (THRUSTINTARRAY32 *)temp->column_indices;
if (temp->row_offsets) delete (THRUSTINTARRAY32 *)temp->row_offsets;
delete (CsrMatrix *)temp;
}
#endif
}
}
if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* transpose mat struct may be already present, update data */
CsrMatrix *matrix = (CsrMatrix *)matstruct->mat;
CsrMatrix *matrixT = (CsrMatrix *)matstructT->mat;
PetscCheck(matrix, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix");
PetscCheck(matrix->row_offsets, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix rows");
PetscCheck(matrix->column_indices, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix cols");
PetscCheck(matrix->values, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix values");
PetscCheck(matrixT, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT");
PetscCheck(matrixT->row_offsets, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT rows");
PetscCheck(matrixT->column_indices, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT cols");
PetscCheck(matrixT->values, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT values");
if (!cusparsestruct->rowoffsets_gpu) { /* this may be absent when we did not construct the transpose with csr2csc */
cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
cusparsestruct->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1);
PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt)));
}
if (!cusparsestruct->csr2csc_i) {
THRUSTARRAY csr2csc_a(matrix->num_entries);
PetscCallThrust(thrust::sequence(thrust::device, csr2csc_a.begin(), csr2csc_a.end(), 0.0));
indexBase = cusparseGetMatIndexBase(matstruct->descr);
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
void *csr2cscBuffer;
size_t csr2cscBufferSize;
stat = cusparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, matrix->values->data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(),
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, cusparsestruct->csr2cscAlg, &csr2cscBufferSize);
PetscCallCUSPARSE(stat);
PetscCallCUDA(cudaMalloc(&csr2cscBuffer, csr2cscBufferSize));
#endif
if (matrix->num_entries) {
/* When there are no nonzeros, this routine mistakenly returns CUSPARSE_STATUS_INVALID_VALUE in
mat_tests-ex62_15_mpiaijcusparse on ranks 0 and 2 with CUDA-11. But CUDA-10 is OK.
I checked every parameters and they were just fine. I have no clue why cusparse complains.
Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[]
should be filled with indexBase. So I just take a shortcut here.
*/
stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, csr2csc_a.data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, cusparsestruct->csr2cscAlg, csr2cscBuffer);
PetscCallCUSPARSE(stat);
#else
matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase);
PetscCallCUSPARSE(stat);
#endif
} else {
matrixT->row_offsets->assign(matrixT->row_offsets->size(), indexBase);
}
cusparsestruct->csr2csc_i = new THRUSTINTARRAY(matrix->num_entries);
PetscCallThrust(thrust::transform(thrust::device, matrixT->values->begin(), matrixT->values->end(), cusparsestruct->csr2csc_i->begin(), PetscScalarToPetscInt()));
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscCallCUDA(cudaFree(csr2cscBuffer));
#endif
}
PetscCallThrust(
thrust::copy(thrust::device, thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->begin()), thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->end()), matrixT->values->begin()));
}
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0));
/* the compressed row indices is not used for matTranspose */
matstructT->cprowIndices = NULL;
/* assign the pointer */
((Mat_SeqAIJCUSPARSE *)A->spptr)->matTranspose = matstructT;
A->transupdated = PETSC_TRUE;
PetscFunctionReturn(PETSC_SUCCESS);
}
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_LU(Mat A, Vec b, Vec x)
{
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr);
const Mat_SeqAIJ *aij = static_cast<Mat_SeqAIJ *>(A->data);
const cusparseOperation_t op = CUSPARSE_OPERATION_NON_TRANSPOSE;
const cusparseSpSVAlg_t alg = CUSPARSE_SPSV_ALG_DEFAULT;
PetscInt m = A->rmap->n;
PetscFunctionBegin;
PetscCall(PetscLogGpuTimeBegin());
PetscCall(VecCUDAGetArrayWrite(x, &xarray));
PetscCall(VecCUDAGetArrayRead(b, &barray));
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
// Reorder b with the row permutation if needed, and wrap the result in fs->X
if (fs->rpermIndices) {
PetscCallThrust(thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->end()), thrust::device_pointer_cast(fs->X)));
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, fs->X));
} else {
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray));
}
// Solve L Y = X
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y));
// Note that cusparseSpSV_solve() secretly uses the external buffer used in cusparseSpSV_analysis()!
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, op, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_L));
// Solve U X = Y
if (fs->cpermIndices) {
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, fs->X));
} else {
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, xarray));
}
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, op, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, alg, fs->spsvDescr_U));
// Reorder X with the column permutation if needed, and put the result back to x
if (fs->cpermIndices) {
PetscCallThrust(thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X), fs->cpermIndices->begin()),
thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X + m), fs->cpermIndices->end()), xGPU));
}
PetscCall(VecCUDARestoreArrayRead(b, &barray));
PetscCall(VecCUDARestoreArrayWrite(x, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(2.0 * aij->nz - m));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_LU(Mat A, Vec b, Vec x)
{
Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr);
Mat_SeqAIJ *aij = static_cast<Mat_SeqAIJ *>(A->data);
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
const cusparseOperation_t opA = CUSPARSE_OPERATION_TRANSPOSE;
const cusparseSpSVAlg_t alg = CUSPARSE_SPSV_ALG_DEFAULT;
PetscInt m = A->rmap->n;
PetscFunctionBegin;
PetscCall(PetscLogGpuTimeBegin());
if (!fs->createdTransposeSpSVDescr) { // Call MatSolveTranspose() for the first time
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Lt));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* The matrix is still L. We only do transpose solve with it */
fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Lt, &fs->spsvBufferSize_Lt));
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Ut));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut, &fs->spsvBufferSize_Ut));
PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_Lt, fs->spsvBufferSize_Lt));
PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_Ut, fs->spsvBufferSize_Ut));
fs->createdTransposeSpSVDescr = PETSC_TRUE;
}
if (!fs->updatedTransposeSpSVAnalysis) {
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Lt, fs->spsvBuffer_Lt));
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut, fs->spsvBuffer_Ut));
fs->updatedTransposeSpSVAnalysis = PETSC_TRUE;
}
PetscCall(VecCUDAGetArrayWrite(x, &xarray));
PetscCall(VecCUDAGetArrayRead(b, &barray));
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
// Reorder b with the row permutation if needed, and wrap the result in fs->X
if (fs->rpermIndices) {
PetscCallThrust(thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->end()), thrust::device_pointer_cast(fs->X)));
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, fs->X));
} else {
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray));
}
// Solve Ut Y = X
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y));
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut));
// Solve Lt X = Y
if (fs->cpermIndices) { // if need to permute, we need to use the intermediate buffer X
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, fs->X));
} else {
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, xarray));
}
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, alg, fs->spsvDescr_Lt));
// Reorder X with the column permutation if needed, and put the result back to x
if (fs->cpermIndices) {
PetscCallThrust(thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X), fs->cpermIndices->begin()),
thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X + m), fs->cpermIndices->end()), xGPU));
}
PetscCall(VecCUDARestoreArrayRead(b, &barray));
PetscCall(VecCUDARestoreArrayWrite(x, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(2.0 * aij->nz - A->rmap->n));
PetscFunctionReturn(PETSC_SUCCESS);
}
#else
/* Why do we need to analyze the transposed matrix again? Can't we just use op(A) = CUSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A, Vec bb, Vec xx)
{
PetscInt n = xx->map->n;
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
PetscCall(MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A));
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
PetscCall(VecCUDAGetArrayWrite(xx, &xarray));
PetscCall(VecCUDAGetArrayRead(bb, &barray));
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
PetscCall(PetscLogGpuTimeBegin());
/* First, reorder with the row permutation */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU + n, cusparseTriFactors->rpermIndices->end()), xGPU);
/* First, solve U */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, xarray, tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer));
/* Then, solve L */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer));
/* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(xGPU + n, cusparseTriFactors->cpermIndices->end()), tempGPU->begin());
/* Copy the temporary to the full solution. */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), tempGPU->begin(), tempGPU->end(), xGPU);
/* restore */
PetscCall(VecCUDARestoreArrayRead(bb, &barray));
PetscCall(VecCUDARestoreArrayWrite(xx, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A, Vec bb, Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
PetscCall(MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A));
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
PetscCall(VecCUDAGetArrayWrite(xx, &xarray));
PetscCall(VecCUDAGetArrayRead(bb, &barray));
PetscCall(PetscLogGpuTimeBegin());
/* First, solve U */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, barray, tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer));
/* Then, solve L */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer));
/* restore */
PetscCall(VecCUDARestoreArrayRead(bb, &barray));
PetscCall(VecCUDARestoreArrayWrite(xx, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A, Vec bb, Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector;
PetscFunctionBegin;
/* Get the GPU pointers */
PetscCall(VecCUDAGetArrayWrite(xx, &xarray));
PetscCall(VecCUDAGetArrayRead(bb, &barray));
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
PetscCall(PetscLogGpuTimeBegin());
/* First, reorder with the row permutation */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), tempGPU->begin());
/* Next, solve L */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, tempGPU->data().get(), xarray, loTriFactor->solvePolicy, loTriFactor->solveBuffer));
/* Then, solve U */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, xarray, tempGPU->data().get(), upTriFactor->solvePolicy, upTriFactor->solveBuffer));
/* Last, reorder with the column permutation */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()), xGPU);
PetscCall(VecCUDARestoreArrayRead(bb, &barray));
PetscCall(VecCUDARestoreArrayWrite(xx, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A, Vec bb, Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector;
PetscFunctionBegin;
/* Get the GPU pointers */
PetscCall(VecCUDAGetArrayWrite(xx, &xarray));
PetscCall(VecCUDAGetArrayRead(bb, &barray));
PetscCall(PetscLogGpuTimeBegin());
/* First, solve L */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, barray, tempGPU->data().get(), loTriFactor->solvePolicy, loTriFactor->solveBuffer));
/* Next, solve U */
PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, tempGPU->data().get(), xarray, upTriFactor->solvePolicy, upTriFactor->solveBuffer));
PetscCall(VecCUDARestoreArrayRead(bb, &barray));
PetscCall(VecCUDARestoreArrayWrite(xx, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n));
PetscFunctionReturn(PETSC_SUCCESS);
}
#endif
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
static PetscErrorCode MatILUFactorNumeric_SeqAIJCUSPARSE_ILU0(Mat fact, Mat A, const MatFactorInfo *)
{
Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr;
Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data;
Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
CsrMatrix *Acsr;
PetscInt m, nz;
PetscBool flg;
PetscFunctionBegin;
if (PetscDefined(USE_DEBUG)) {
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name);
}
/* Copy A's value to fact */
m = fact->rmap->n;
nz = aij->nz;
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
Acsr = (CsrMatrix *)Acusp->mat->mat;
PetscCallCUDA(cudaMemcpyAsync(fs->csrVal, Acsr->values->data().get(), sizeof(PetscScalar) * nz, cudaMemcpyDeviceToDevice, PetscDefaultCudaStream));
/* Factorize fact inplace */
if (m)
PetscCallCUSPARSE(cusparseXcsrilu02(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */
fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ilu0Info_M, fs->policy_M, fs->factBuffer_M));
if (PetscDefined(USE_DEBUG)) {
int numerical_zero;
cusparseStatus_t status;
status = cusparseXcsrilu02_zeroPivot(fs->handle, fs->ilu0Info_M, &numerical_zero);
PetscAssert(CUSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Numerical zero pivot detected in csrilu02: A(%d,%d) is zero", numerical_zero, numerical_zero);
}
/* cusparseSpSV_analysis() is numeric, i.e., it requires valid matrix values, therefore, we do it after cusparseXcsrilu02()
See discussion at https://github.com/NVIDIA/CUDALibrarySamples/issues/78
*/
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L));
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, fs->spsvBuffer_U));
/* L, U values have changed, reset the flag to indicate we need to redo cusparseSpSV_analysis() for transpose solve */
fs->updatedTransposeSpSVAnalysis = PETSC_FALSE;
fact->offloadmask = PETSC_OFFLOAD_GPU;
fact->ops->solve = MatSolve_SeqAIJCUSPARSE_LU; // spMatDescr_L/U uses 32-bit indices, but cusparseSpSV_solve() supports both 32 and 64. The info is encoded in cusparseSpMatDescr_t.
fact->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_LU;
fact->ops->matsolve = NULL;
fact->ops->matsolvetranspose = NULL;
PetscCall(PetscLogGpuFlops(fs->numericFactFlops));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(Mat fact, Mat A, IS, IS, const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr;
Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data;
PetscInt m, nz;
PetscFunctionBegin;
if (PetscDefined(USE_DEBUG)) {
PetscInt i;
PetscBool flg, missing;
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name);
PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Must be square matrix, rows %" PetscInt_FMT " columns %" PetscInt_FMT, A->rmap->n, A->cmap->n);
PetscCall(MatMissingDiagonal(A, &missing, &i));
PetscCheck(!missing, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix is missing diagonal entry %" PetscInt_FMT, i);
}
/* Free the old stale stuff */
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&fs));
/* Copy over A's meta data to fact. Note that we also allocated fact's i,j,a on host,
but they will not be used. Allocate them just for easy debugging.
*/
PetscCall(MatDuplicateNoCreate_SeqAIJ(fact, A, MAT_DO_NOT_COPY_VALUES, PETSC_TRUE /*malloc*/));
fact->offloadmask = PETSC_OFFLOAD_BOTH;
fact->factortype = MAT_FACTOR_ILU;
fact->info.factor_mallocs = 0;
fact->info.fill_ratio_given = info->fill;
fact->info.fill_ratio_needed = 1.0;
aij->row = NULL;
aij->col = NULL;
/* ====================================================================== */
/* Copy A's i, j to fact and also allocate the value array of fact. */
/* We'll do in-place factorization on fact */
/* ====================================================================== */
const int *Ai, *Aj;
m = fact->rmap->n;
nz = aij->nz;
PetscCallCUDA(cudaMalloc((void **)&fs->csrRowPtr32, sizeof(*(fs->csrRowPtr32)) * (m + 1)));
PetscCallCUDA(cudaMalloc((void **)&fs->csrColIdx32, sizeof(*(fs->csrColIdx32)) * nz));
PetscCallCUDA(cudaMalloc((void **)&fs->csrVal, sizeof(*(fs->csrVal)) * nz));
PetscCall(MatSeqAIJCUSPARSEGetIJ(A, PETSC_FALSE, &Ai, &Aj)); /* Do not use compressed Ai. The returned Ai, Aj are 32-bit */
PetscCallCUDA(cudaMemcpyAsync(fs->csrRowPtr32, Ai, sizeof(*Ai) * (m + 1), cudaMemcpyDeviceToDevice, PetscDefaultCudaStream));
PetscCallCUDA(cudaMemcpyAsync(fs->csrColIdx32, Aj, sizeof(*Aj) * nz, cudaMemcpyDeviceToDevice, PetscDefaultCudaStream));
/* ====================================================================== */
/* Create descriptors for M, L, U */
/* ====================================================================== */
cusparseFillMode_t fillMode;
cusparseDiagType_t diagType;
PetscCallCUSPARSE(cusparseCreateMatDescr(&fs->matDescr_M));
PetscCallCUSPARSE(cusparseSetMatIndexBase(fs->matDescr_M, CUSPARSE_INDEX_BASE_ZERO));
PetscCallCUSPARSE(cusparseSetMatType(fs->matDescr_M, CUSPARSE_MATRIX_TYPE_GENERAL));
/* https://docs.nvidia.com/cuda/cusparse/index.html#cusparseDiagType_t
cusparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always
assumed to be present, but if CUSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that
all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine
assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory.
*/
fillMode = CUSPARSE_FILL_MODE_LOWER;
diagType = CUSPARSE_DIAG_TYPE_UNIT;
PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_L, m, m, nz, fs->csrRowPtr32, fs->csrColIdx32, fs->csrVal, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype));
PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode)));
PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType)));
fillMode = CUSPARSE_FILL_MODE_UPPER;
diagType = CUSPARSE_DIAG_TYPE_NON_UNIT;
PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_U, m, m, nz, fs->csrRowPtr32, fs->csrColIdx32, fs->csrVal, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype));
PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_U, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode)));
PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_U, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType)));
/* ========================================================================= */
/* Query buffer sizes for csrilu0, SpSV and allocate buffers */
/* ========================================================================= */
PetscCallCUSPARSE(cusparseCreateCsrilu02Info(&fs->ilu0Info_M));
if (m)
PetscCallCUSPARSE(cusparseXcsrilu02_bufferSize(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */
fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ilu0Info_M, &fs->factBufferSize_M));
PetscCallCUDA(cudaMalloc((void **)&fs->X, sizeof(PetscScalar) * m));
PetscCallCUDA(cudaMalloc((void **)&fs->Y, sizeof(PetscScalar) * m));
PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype));
PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype));
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L));
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_U));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, &fs->spsvBufferSize_U));
/* From my experiment with the example at https://github.com/NVIDIA/CUDALibrarySamples/tree/master/cuSPARSE/bicgstab,
and discussion at https://github.com/NVIDIA/CUDALibrarySamples/issues/77,
spsvBuffer_L/U can not be shared (i.e., the same) for our case, but factBuffer_M can share with either of spsvBuffer_L/U.
To save memory, we make factBuffer_M share with the bigger of spsvBuffer_L/U.
*/
if (fs->spsvBufferSize_L > fs->spsvBufferSize_U) {
PetscCallCUDA(cudaMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_L, (size_t)fs->factBufferSize_M)));
fs->spsvBuffer_L = fs->factBuffer_M;
PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_U, fs->spsvBufferSize_U));
} else {
PetscCallCUDA(cudaMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_U, (size_t)fs->factBufferSize_M)));
fs->spsvBuffer_U = fs->factBuffer_M;
PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L));
}
/* ========================================================================== */
/* Perform analysis of ilu0 on M, SpSv on L and U */
/* The lower(upper) triangular part of M has the same sparsity pattern as L(U)*/
/* ========================================================================== */
int structural_zero;
cusparseStatus_t status;
fs->policy_M = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
if (m)
PetscCallCUSPARSE(cusparseXcsrilu02_analysis(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */
fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ilu0Info_M, fs->policy_M, fs->factBuffer_M));
if (PetscDefined(USE_DEBUG)) {
/* Function cusparseXcsrilu02_zeroPivot() is a blocking call. It calls cudaDeviceSynchronize() to make sure all previous kernels are done. */
status = cusparseXcsrilu02_zeroPivot(fs->handle, fs->ilu0Info_M, &structural_zero);
PetscCheck(CUSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Structural zero pivot detected in csrilu02: A(%d,%d) is missing", structural_zero, structural_zero);
}
/* Estimate FLOPs of the numeric factorization */
{
Mat_SeqAIJ *Aseq = (Mat_SeqAIJ *)A->data;
PetscInt *Ai, *Adiag, nzRow, nzLeft;
PetscLogDouble flops = 0.0;
PetscCall(MatMarkDiagonal_SeqAIJ(A));
Ai = Aseq->i;
Adiag = Aseq->diag;
for (PetscInt i = 0; i < m; i++) {
if (Ai[i] < Adiag[i] && Adiag[i] < Ai[i + 1]) { /* There are nonzeros left to the diagonal of row i */
nzRow = Ai[i + 1] - Ai[i];
nzLeft = Adiag[i] - Ai[i];
/* We want to eliminate nonzeros left to the diagonal one by one. Assume each time, nonzeros right
and include the eliminated one will be updated, which incurs a multiplication and an addition.
*/
nzLeft = (nzRow - 1) / 2;
flops += nzLeft * (2.0 * nzRow - nzLeft + 1);
}
}
fs->numericFactFlops = flops;
}
fact->ops->lufactornumeric = MatILUFactorNumeric_SeqAIJCUSPARSE_ILU0;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_ICC0(Mat fact, Vec b, Vec x)
{
Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr;
Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data;
const PetscScalar *barray;
PetscScalar *xarray;
PetscFunctionBegin;
PetscCall(VecCUDAGetArrayWrite(x, &xarray));
PetscCall(VecCUDAGetArrayRead(b, &barray));
PetscCall(PetscLogGpuTimeBegin());
/* Solve L*y = b */
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray));
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y));
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* L Y = X */
fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L));
/* Solve Lt*x = y */
PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, xarray));
PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* Lt X = Y */
fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt));
PetscCall(VecCUDARestoreArrayRead(b, &barray));
PetscCall(VecCUDARestoreArrayWrite(x, &xarray));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(2.0 * aij->nz - fact->rmap->n));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatICCFactorNumeric_SeqAIJCUSPARSE_ICC0(Mat fact, Mat A, const MatFactorInfo *)
{
Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr;
Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data;
Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
CsrMatrix *Acsr;
PetscInt m, nz;
PetscBool flg;
PetscFunctionBegin;
if (PetscDefined(USE_DEBUG)) {
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name);
}
/* Copy A's value to fact */
m = fact->rmap->n;
nz = aij->nz;
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
Acsr = (CsrMatrix *)Acusp->mat->mat;
PetscCallCUDA(cudaMemcpyAsync(fs->csrVal, Acsr->values->data().get(), sizeof(PetscScalar) * nz, cudaMemcpyDeviceToDevice, PetscDefaultCudaStream));
/* Factorize fact inplace */
/* https://docs.nvidia.com/cuda/cusparse/index.html#csric02_solve
Function csric02() only takes the lower triangular part of matrix A to perform factorization.
The matrix type must be CUSPARSE_MATRIX_TYPE_GENERAL, the fill mode and diagonal type are ignored,
and the strictly upper triangular part is ignored and never touched. It does not matter if A is Hermitian or not.
In other words, from the point of view of csric02() A is Hermitian and only the lower triangular part is provided.
*/
if (m) PetscCallCUSPARSE(cusparseXcsric02(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ic0Info_M, fs->policy_M, fs->factBuffer_M));
if (PetscDefined(USE_DEBUG)) {
int numerical_zero;
cusparseStatus_t status;
status = cusparseXcsric02_zeroPivot(fs->handle, fs->ic0Info_M, &numerical_zero);
PetscAssert(CUSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Numerical zero pivot detected in csric02: A(%d,%d) is zero", numerical_zero, numerical_zero);
}
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L));
/* Note that cusparse reports this error if we use double and CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE
** On entry to cusparseSpSV_analysis(): conjugate transpose (opA) is not supported for matA data type, current -> CUDA_R_64F
*/
PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, fs->spsvBuffer_Lt));
fact->offloadmask = PETSC_OFFLOAD_GPU;
fact->ops->solve = MatSolve_SeqAIJCUSPARSE_ICC0;
fact->ops->solvetranspose = MatSolve_SeqAIJCUSPARSE_ICC0;
fact->ops->matsolve = NULL;
fact->ops->matsolvetranspose = NULL;
PetscCall(PetscLogGpuFlops(fs->numericFactFlops));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE_ICC0(Mat fact, Mat A, IS, const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr;
Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data;
PetscInt m, nz;
PetscFunctionBegin;
if (PetscDefined(USE_DEBUG)) {
PetscInt i;
PetscBool flg, missing;
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name);
PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Must be square matrix, rows %" PetscInt_FMT " columns %" PetscInt_FMT, A->rmap->n, A->cmap->n);
PetscCall(MatMissingDiagonal(A, &missing, &i));
PetscCheck(!missing, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix is missing diagonal entry %" PetscInt_FMT, i);
}
/* Free the old stale stuff */
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&fs));
/* Copy over A's meta data to fact. Note that we also allocated fact's i,j,a on host,
but they will not be used. Allocate them just for easy debugging.
*/
PetscCall(MatDuplicateNoCreate_SeqAIJ(fact, A, MAT_DO_NOT_COPY_VALUES, PETSC_TRUE /*malloc*/));
fact->offloadmask = PETSC_OFFLOAD_BOTH;
fact->factortype = MAT_FACTOR_ICC;
fact->info.factor_mallocs = 0;
fact->info.fill_ratio_given = info->fill;
fact->info.fill_ratio_needed = 1.0;
aij->row = NULL;
aij->col = NULL;
/* ====================================================================== */
/* Copy A's i, j to fact and also allocate the value array of fact. */
/* We'll do in-place factorization on fact */
/* ====================================================================== */
const int *Ai, *Aj;
m = fact->rmap->n;
nz = aij->nz;
PetscCallCUDA(cudaMalloc((void **)&fs->csrRowPtr32, sizeof(*(fs->csrRowPtr32)) * (m + 1)));
PetscCallCUDA(cudaMalloc((void **)&fs->csrColIdx32, sizeof(*(fs->csrColIdx32)) * nz));
PetscCallCUDA(cudaMalloc((void **)&fs->csrVal, sizeof(PetscScalar) * nz));
PetscCall(MatSeqAIJCUSPARSEGetIJ(A, PETSC_FALSE, &Ai, &Aj)); /* Do not use compressed Ai */
PetscCallCUDA(cudaMemcpyAsync(fs->csrRowPtr32, Ai, sizeof(*Ai) * (m + 1), cudaMemcpyDeviceToDevice, PetscDefaultCudaStream));
PetscCallCUDA(cudaMemcpyAsync(fs->csrColIdx32, Aj, sizeof(*Aj) * nz, cudaMemcpyDeviceToDevice, PetscDefaultCudaStream));
/* ====================================================================== */
/* Create mat descriptors for M, L */
/* ====================================================================== */
cusparseFillMode_t fillMode;
cusparseDiagType_t diagType;
PetscCallCUSPARSE(cusparseCreateMatDescr(&fs->matDescr_M));
PetscCallCUSPARSE(cusparseSetMatIndexBase(fs->matDescr_M, CUSPARSE_INDEX_BASE_ZERO));
PetscCallCUSPARSE(cusparseSetMatType(fs->matDescr_M, CUSPARSE_MATRIX_TYPE_GENERAL));
/* https://docs.nvidia.com/cuda/cusparse/index.html#cusparseDiagType_t
cusparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always
assumed to be present, but if CUSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that
all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine
assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory.
*/
fillMode = CUSPARSE_FILL_MODE_LOWER;
diagType = CUSPARSE_DIAG_TYPE_NON_UNIT;
PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_L, m, m, nz, fs->csrRowPtr32, fs->csrColIdx32, fs->csrVal, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype));
PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode)));
PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType)));
/* ========================================================================= */
/* Query buffer sizes for csric0, SpSV of L and Lt, and allocate buffers */
/* ========================================================================= */
PetscCallCUSPARSE(cusparseCreateCsric02Info(&fs->ic0Info_M));
if (m) PetscCallCUSPARSE(cusparseXcsric02_bufferSize(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ic0Info_M, &fs->factBufferSize_M));
PetscCallCUDA(cudaMalloc((void **)&fs->X, sizeof(PetscScalar) * m));
PetscCallCUDA(cudaMalloc((void **)&fs->Y, sizeof(PetscScalar) * m));
PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype));
PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype));
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L));
PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Lt));
PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, &fs->spsvBufferSize_Lt));
/* To save device memory, we make the factorization buffer share with one of the solver buffer.
See also comments in MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0().
*/
if (fs->spsvBufferSize_L > fs->spsvBufferSize_Lt) {
PetscCallCUDA(cudaMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_L, (size_t)fs->factBufferSize_M)));
fs->spsvBuffer_L = fs->factBuffer_M;
PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_Lt, fs->spsvBufferSize_Lt));
} else {
PetscCallCUDA(cudaMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_Lt, (size_t)fs->factBufferSize_M)));
fs->spsvBuffer_Lt = fs->factBuffer_M;
PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L));
}
/* ========================================================================== */
/* Perform analysis of ic0 on M */
/* The lower triangular part of M has the same sparsity pattern as L */
/* ========================================================================== */
int structural_zero;
cusparseStatus_t status;
fs->policy_M = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
if (m) PetscCallCUSPARSE(cusparseXcsric02_analysis(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ic0Info_M, fs->policy_M, fs->factBuffer_M));
if (PetscDefined(USE_DEBUG)) {
/* Function cusparseXcsric02_zeroPivot() is a blocking call. It calls cudaDeviceSynchronize() to make sure all previous kernels are done. */
status = cusparseXcsric02_zeroPivot(fs->handle, fs->ic0Info_M, &structural_zero);
PetscCheck(CUSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Structural zero pivot detected in csric02: A(%d,%d) is missing", structural_zero, structural_zero);
}
/* Estimate FLOPs of the numeric factorization */
{
Mat_SeqAIJ *Aseq = (Mat_SeqAIJ *)A->data;
PetscInt *Ai, nzRow, nzLeft;
PetscLogDouble flops = 0.0;
Ai = Aseq->i;
for (PetscInt i = 0; i < m; i++) {
nzRow = Ai[i + 1] - Ai[i];
if (nzRow > 1) {
/* We want to eliminate nonzeros left to the diagonal one by one. Assume each time, nonzeros right
and include the eliminated one will be updated, which incurs a multiplication and an addition.
*/
nzLeft = (nzRow - 1) / 2;
flops += nzLeft * (2.0 * nzRow - nzLeft + 1);
}
}
fs->numericFactFlops = flops;
}
fact->ops->choleskyfactornumeric = MatICCFactorNumeric_SeqAIJCUSPARSE_ICC0;
PetscFunctionReturn(PETSC_SUCCESS);
}
#endif
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B, Mat A, const MatFactorInfo *info)
{
// use_cpu_solve is a field in Mat_SeqAIJCUSPARSE. B, a factored matrix, uses Mat_SeqAIJCUSPARSETriFactors.
Mat_SeqAIJCUSPARSE *cusparsestruct = static_cast<Mat_SeqAIJCUSPARSE *>(A->spptr);
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A));
PetscCall(MatLUFactorNumeric_SeqAIJ(B, A, info));
B->offloadmask = PETSC_OFFLOAD_CPU;
if (!cusparsestruct->use_cpu_solve) {
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
B->ops->solve = MatSolve_SeqAIJCUSPARSE_LU;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_LU;
#else
/* determine which version of MatSolve needs to be used. */
Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data;
IS isrow = b->row, iscol = b->col;
PetscBool row_identity, col_identity;
PetscCall(ISIdentity(isrow, &row_identity));
PetscCall(ISIdentity(iscol, &col_identity));
if (row_identity && col_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
}
#endif
}
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
/* get the triangular factors */
if (!cusparsestruct->use_cpu_solve) PetscCall(MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS isrow, IS iscol, const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(B->spptr);
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors));
PetscCall(MatLUFactorSymbolic_SeqAIJ(B, A, isrow, iscol, info));
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS isrow, IS iscol, const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr;
PetscFunctionBegin;
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
PetscBool row_identity = PETSC_FALSE, col_identity = PETSC_FALSE;
if (cusparseTriFactors->factorizeOnDevice) {
PetscCall(ISIdentity(isrow, &row_identity));
PetscCall(ISIdentity(iscol, &col_identity));
}
if (!info->levels && row_identity && col_identity) {
PetscCall(MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(B, A, isrow, iscol, info));
} else
#endif
{
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors));
PetscCall(MatILUFactorSymbolic_SeqAIJ(B, A, isrow, iscol, info));
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS perm, const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr;
PetscFunctionBegin;
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
PetscBool perm_identity = PETSC_FALSE;
if (cusparseTriFactors->factorizeOnDevice) PetscCall(ISIdentity(perm, &perm_identity));
if (!info->levels && perm_identity) {
PetscCall(MatICCFactorSymbolic_SeqAIJCUSPARSE_ICC0(B, A, perm, info));
} else
#endif
{
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors));
PetscCall(MatICCFactorSymbolic_SeqAIJ(B, A, perm, info));
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS perm, const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr;
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors));
PetscCall(MatCholeskyFactorSymbolic_SeqAIJ(B, A, perm, info));
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat, MatSolverType *type)
{
PetscFunctionBegin;
*type = MATSOLVERCUSPARSE;
PetscFunctionReturn(PETSC_SUCCESS);
}
/*MC
MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices
on a single GPU of type, `MATSEQAIJCUSPARSE`. Currently supported
algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer
performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the
CuSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these
algorithms are not recommended. This class does NOT support direct solver operations.
Level: beginner
.seealso: [](chapter_matrices), `Mat`, `MATSEQAIJCUSPARSE`, `PCFactorSetMatSolverType()`, `MatSolverType`, `MatCreateSeqAIJCUSPARSE()`,
`MATAIJCUSPARSE`, `MatCreateAIJCUSPARSE()`, `MatCUSPARSESetFormat()`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation`
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A, MatFactorType ftype, Mat *B)
{
PetscInt n = A->rmap->n;
PetscBool factOnDevice, factOnHost;
char *prefix;
char factPlace[32] = "device"; /* the default */
PetscFunctionBegin;
PetscCall(MatCreate(PetscObjectComm((PetscObject)A), B));
PetscCall(MatSetSizes(*B, n, n, n, n));
(*B)->factortype = ftype; // factortype makes MatSetType() allocate spptr of type Mat_SeqAIJCUSPARSETriFactors
PetscCall(MatSetType(*B, MATSEQAIJCUSPARSE));
prefix = (*B)->factorprefix ? (*B)->factorprefix : ((PetscObject)A)->prefix;
PetscOptionsBegin(PetscObjectComm((PetscObject)(*B)), prefix, "MatGetFactor", "Mat");
PetscCall(PetscOptionsString("-mat_factor_bind_factorization", "Do matrix factorization on host or device when possible", "MatGetFactor", NULL, factPlace, sizeof(factPlace), NULL));
PetscOptionsEnd();
PetscCall(PetscStrcasecmp("device", factPlace, &factOnDevice));
PetscCall(PetscStrcasecmp("host", factPlace, &factOnHost));
PetscCheck(factOnDevice || factOnHost, PetscObjectComm((PetscObject)(*B)), PETSC_ERR_ARG_OUTOFRANGE, "Wrong option %s to -mat_factor_bind_factorization <string>. Only host and device are allowed", factPlace);
((Mat_SeqAIJCUSPARSETriFactors *)(*B)->spptr)->factorizeOnDevice = factOnDevice;
if (A->boundtocpu && A->bindingpropagates) PetscCall(MatBindToCPU(*B, PETSC_TRUE));
if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) {
PetscCall(MatSetBlockSizesFromMats(*B, A, A));
if (!A->boundtocpu) {
(*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE;
} else {
(*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJ;
(*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJ;
}
PetscCall(PetscStrallocpy(MATORDERINGND, (char **)&(*B)->preferredordering[MAT_FACTOR_LU]));
PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ILU]));
PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ILUDT]));
} else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) {
if (!A->boundtocpu) {
(*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE;
} else {
(*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJ;
(*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJ;
}
PetscCall(PetscStrallocpy(MATORDERINGND, (char **)&(*B)->preferredordering[MAT_FACTOR_CHOLESKY]));
PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ICC]));
} else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Factor type not supported for CUSPARSE Matrix Types");
PetscCall(MatSeqAIJSetPreallocation(*B, MAT_SKIP_ALLOCATION, NULL));
(*B)->canuseordering = PETSC_TRUE;
PetscCall(PetscObjectComposeFunction((PetscObject)(*B), "MatFactorGetSolverType_C", MatFactorGetSolverType_seqaij_cusparse));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
#endif
PetscFunctionBegin;
if (A->offloadmask == PETSC_OFFLOAD_GPU) {
PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyFromGPU, A, 0, 0, 0));
if (A->factortype == MAT_FACTOR_NONE) {
CsrMatrix *matrix = (CsrMatrix *)cusp->mat->mat;
PetscCallCUDA(cudaMemcpy(a->a, matrix->values->data().get(), a->nz * sizeof(PetscScalar), cudaMemcpyDeviceToHost));
}
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
else if (fs->csrVal) {
/* We have a factorized matrix on device and are able to copy it to host */
PetscCallCUDA(cudaMemcpy(a->a, fs->csrVal, a->nz * sizeof(PetscScalar), cudaMemcpyDeviceToHost));
}
#endif
else
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "No support for copying this type of factorized matrix from device to host");
PetscCall(PetscLogGpuToCpu(a->nz * sizeof(PetscScalar)));
PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyFromGPU, A, 0, 0, 0));
A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A, PetscScalar *array[])
{
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A));
*array = ((Mat_SeqAIJ *)A->data)->a;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJRestoreArray_SeqAIJCUSPARSE(Mat A, PetscScalar *array[])
{
PetscFunctionBegin;
A->offloadmask = PETSC_OFFLOAD_CPU;
*array = NULL;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJGetArrayRead_SeqAIJCUSPARSE(Mat A, const PetscScalar *array[])
{
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A));
*array = ((Mat_SeqAIJ *)A->data)->a;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE(Mat, const PetscScalar *array[])
{
PetscFunctionBegin;
*array = NULL;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE(Mat A, PetscScalar *array[])
{
PetscFunctionBegin;
*array = ((Mat_SeqAIJ *)A->data)->a;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE(Mat A, PetscScalar *array[])
{
PetscFunctionBegin;
A->offloadmask = PETSC_OFFLOAD_CPU;
*array = NULL;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJGetCSRAndMemType_SeqAIJCUSPARSE(Mat A, const PetscInt **i, const PetscInt **j, PetscScalar **a, PetscMemType *mtype)
{
Mat_SeqAIJCUSPARSE *cusp;
CsrMatrix *matrix;
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCheck(A->factortype == MAT_FACTOR_NONE, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONGSTATE, "Not for factored matrix");
cusp = static_cast<Mat_SeqAIJCUSPARSE *>(A->spptr);
PetscCheck(cusp != NULL, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONGSTATE, "cusp is NULL");
matrix = (CsrMatrix *)cusp->mat->mat;
if (i) {
#if !defined(PETSC_USE_64BIT_INDICES)
*i = matrix->row_offsets->data().get();
#else
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSparse does not supported 64-bit indices");
#endif
}
if (j) {
#if !defined(PETSC_USE_64BIT_INDICES)
*j = matrix->column_indices->data().get();
#else
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSparse does not supported 64-bit indices");
#endif
}
if (a) *a = matrix->values->data().get();
if (mtype) *mtype = PETSC_MEMTYPE_CUDA;
PetscFunctionReturn(PETSC_SUCCESS);
}
PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat;
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
PetscInt m = A->rmap->n, *ii, *ridx, tmp;
cusparseStatus_t stat;
PetscBool both = PETSC_TRUE;
PetscFunctionBegin;
PetscCheck(!A->boundtocpu, PETSC_COMM_SELF, PETSC_ERR_GPU, "Cannot copy to GPU");
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { /* Copy values only */
CsrMatrix *matrix;
matrix = (CsrMatrix *)cusparsestruct->mat->mat;
PetscCheck(!a->nz || a->a, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR values");
PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyToGPU, A, 0, 0, 0));
matrix->values->assign(a->a, a->a + a->nz);
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogCpuToGpu((a->nz) * sizeof(PetscScalar)));
PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyToGPU, A, 0, 0, 0));
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE));
} else {
PetscInt nnz;
PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyToGPU, A, 0, 0, 0));
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat, cusparsestruct->format));
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE));
delete cusparsestruct->workVector;
delete cusparsestruct->rowoffsets_gpu;
cusparsestruct->workVector = NULL;
cusparsestruct->rowoffsets_gpu = NULL;
try {
if (a->compressedrow.use) {
m = a->compressedrow.nrows;
ii = a->compressedrow.i;
ridx = a->compressedrow.rindex;
} else {
m = A->rmap->n;
ii = a->i;
ridx = NULL;
}
PetscCheck(ii, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR row data");
if (!a->a) {
nnz = ii[m];
both = PETSC_FALSE;
} else nnz = a->nz;
PetscCheck(!nnz || a->j, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR column data");
/* create cusparse matrix */
cusparsestruct->nrows = m;
matstruct = new Mat_SeqAIJCUSPARSEMultStruct;
PetscCallCUSPARSE(cusparseCreateMatDescr(&matstruct->descr));
PetscCallCUSPARSE(cusparseSetMatIndexBase(matstruct->descr, CUSPARSE_INDEX_BASE_ZERO));
PetscCallCUSPARSE(cusparseSetMatType(matstruct->descr, CUSPARSE_MATRIX_TYPE_GENERAL));
PetscCallCUDA(cudaMalloc((void **)&(matstruct->alpha_one), sizeof(PetscScalar)));
PetscCallCUDA(cudaMalloc((void **)&(matstruct->beta_zero), sizeof(PetscScalar)));
PetscCallCUDA(cudaMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar)));
PetscCallCUDA(cudaMemcpy(matstruct->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(matstruct->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice));
PetscCallCUSPARSE(cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE));
/* Build a hybrid/ellpack matrix if this option is chosen for the storage */
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
/* set the matrix */
CsrMatrix *mat = new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m + 1);
mat->row_offsets->assign(ii, ii + m + 1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j + nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a + nnz);
/* assign the pointer */
matstruct->mat = mat;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
if (mat->num_rows) { /* cusparse errors on empty matrices! */
stat = cusparseCreateCsr(&matstruct->matDescr, mat->num_rows, mat->num_cols, mat->num_entries, mat->row_offsets->data().get(), mat->column_indices->data().get(), mat->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);
PetscCallCUSPARSE(stat);
}
#endif
} else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *mat = new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m + 1);
mat->row_offsets->assign(ii, ii + m + 1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j + nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a + nnz);
cusparseHybMat_t hybMat;
PetscCallCUSPARSE(cusparseCreateHybMat(&hybMat));
cusparseHybPartition_t partition = cusparsestruct->format == MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), hybMat, 0, partition);
PetscCallCUSPARSE(stat);
/* assign the pointer */
matstruct->mat = hybMat;
if (mat) {
if (mat->values) delete (THRUSTARRAY *)mat->values;
if (mat->column_indices) delete (THRUSTINTARRAY32 *)mat->column_indices;
if (mat->row_offsets) delete (THRUSTINTARRAY32 *)mat->row_offsets;
delete (CsrMatrix *)mat;
}
#endif
}
/* assign the compressed row indices */
if (a->compressedrow.use) {
cusparsestruct->workVector = new THRUSTARRAY(m);
matstruct->cprowIndices = new THRUSTINTARRAY(m);
matstruct->cprowIndices->assign(ridx, ridx + m);
tmp = m;
} else {
cusparsestruct->workVector = NULL;
matstruct->cprowIndices = NULL;
tmp = 0;
}
PetscCall(PetscLogCpuToGpu(((m + 1) + (a->nz)) * sizeof(int) + tmp * sizeof(PetscInt) + (3 + (a->nz)) * sizeof(PetscScalar)));
/* assign the pointer */
cusparsestruct->mat = matstruct;
} catch (char *ex) {
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex);
}
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyToGPU, A, 0, 0, 0));
cusparsestruct->nonzerostate = A->nonzerostate;
}
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
struct VecCUDAPlusEquals {
template <typename Tuple>
__host__ __device__ void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t);
}
};
struct VecCUDAEquals {
template <typename Tuple>
__host__ __device__ void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<0>(t);
}
};
struct VecCUDAEqualsReverse {
template <typename Tuple>
__host__ __device__ void operator()(Tuple t)
{
thrust::get<0>(t) = thrust::get<1>(t);
}
};
struct MatMatCusparse {
PetscBool cisdense;
PetscScalar *Bt;
Mat X;
PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */
PetscLogDouble flops;
CsrMatrix *Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
cusparseSpMatDescr_t matSpBDescr;
PetscBool initialized; /* C = alpha op(A) op(B) + beta C */
cusparseDnMatDescr_t matBDescr;
cusparseDnMatDescr_t matCDescr;
PetscInt Blda, Clda; /* Record leading dimensions of B and C here to detect changes*/
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
void *dBuffer4;
void *dBuffer5;
#endif
size_t mmBufferSize;
void *mmBuffer;
void *mmBuffer2; /* SpGEMM WorkEstimation buffer */
cusparseSpGEMMDescr_t spgemmDesc;
#endif
};
static PetscErrorCode MatDestroy_MatMatCusparse(void *data)
{
MatMatCusparse *mmdata = (MatMatCusparse *)data;
PetscFunctionBegin;
PetscCallCUDA(cudaFree(mmdata->Bt));
delete mmdata->Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
if (mmdata->matSpBDescr) PetscCallCUSPARSE(cusparseDestroySpMat(mmdata->matSpBDescr));
if (mmdata->matBDescr) PetscCallCUSPARSE(cusparseDestroyDnMat(mmdata->matBDescr));
if (mmdata->matCDescr) PetscCallCUSPARSE(cusparseDestroyDnMat(mmdata->matCDescr));
if (mmdata->spgemmDesc) PetscCallCUSPARSE(cusparseSpGEMM_destroyDescr(mmdata->spgemmDesc));
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
if (mmdata->dBuffer4) PetscCallCUDA(cudaFree(mmdata->dBuffer4));
if (mmdata->dBuffer5) PetscCallCUDA(cudaFree(mmdata->dBuffer5));
#endif
if (mmdata->mmBuffer) PetscCallCUDA(cudaFree(mmdata->mmBuffer));
if (mmdata->mmBuffer2) PetscCallCUDA(cudaFree(mmdata->mmBuffer2));
#endif
PetscCall(MatDestroy(&mmdata->X));
PetscCall(PetscFree(data));
PetscFunctionReturn(PETSC_SUCCESS);
}
#include <../src/mat/impls/dense/seq/dense.h> // MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal()
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A, B;
PetscInt m, n, blda, clda;
PetscBool flg, biscuda;
Mat_SeqAIJCUSPARSE *cusp;
cusparseStatus_t stat;
cusparseOperation_t opA;
const PetscScalar *barray;
PetscScalar *carray;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSEMultStruct *mat;
CsrMatrix *csrmat;
PetscFunctionBegin;
MatCheckProduct(C, 1);
PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data empty");
mmdata = (MatMatCusparse *)product->data;
A = product->A;
B = product->B;
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name);
/* currently CopyToGpu does not copy if the matrix is bound to CPU
Instead of silently accepting the wrong answer, I prefer to raise the error */
PetscCheck(!A->boundtocpu, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_PtAP:
mat = cusp->mat;
opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
if (!A->form_explicit_transpose) {
mat = cusp->mat;
opA = CUSPARSE_OPERATION_TRANSPOSE;
} else {
PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A));
mat = cusp->matTranspose;
opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
}
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
case MATPRODUCT_RARt:
mat = cusp->mat;
opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]);
}
PetscCheck(mat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing Mat_SeqAIJCUSPARSEMultStruct");
csrmat = (CsrMatrix *)mat->mat;
/* if the user passed a CPU matrix, copy the data to the GPU */
PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQDENSECUDA, &biscuda));
if (!biscuda) PetscCall(MatConvert(B, MATSEQDENSECUDA, MAT_INPLACE_MATRIX, &B));
PetscCall(MatDenseGetArrayReadAndMemType(B, &barray, nullptr));
PetscCall(MatDenseGetLDA(B, &blda));
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
PetscCall(MatDenseGetArrayWriteAndMemType(mmdata->X, &carray, nullptr));
PetscCall(MatDenseGetLDA(mmdata->X, &clda));
} else {
PetscCall(MatDenseGetArrayWriteAndMemType(C, &carray, nullptr));
PetscCall(MatDenseGetLDA(C, &clda));
}
PetscCall(PetscLogGpuTimeBegin());
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
cusparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE;
/* (re)allocate mmBuffer if not initialized or LDAs are different */
if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) {
size_t mmBufferSize;
if (mmdata->initialized && mmdata->Blda != blda) {
PetscCallCUSPARSE(cusparseDestroyDnMat(mmdata->matBDescr));
mmdata->matBDescr = NULL;
}
if (!mmdata->matBDescr) {
PetscCallCUSPARSE(cusparseCreateDnMat(&mmdata->matBDescr, B->rmap->n, B->cmap->n, blda, (void *)barray, cusparse_scalartype, CUSPARSE_ORDER_COL));
mmdata->Blda = blda;
}
if (mmdata->initialized && mmdata->Clda != clda) {
PetscCallCUSPARSE(cusparseDestroyDnMat(mmdata->matCDescr));
mmdata->matCDescr = NULL;
}
if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */
PetscCallCUSPARSE(cusparseCreateDnMat(&mmdata->matCDescr, m, n, clda, (void *)carray, cusparse_scalartype, CUSPARSE_ORDER_COL));
mmdata->Clda = clda;
}
if (!mat->matDescr) {
stat = cusparseCreateCsr(&mat->matDescr, csrmat->num_rows, csrmat->num_cols, csrmat->num_entries, csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), csrmat->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);
PetscCallCUSPARSE(stat);
}
stat = cusparseSpMM_bufferSize(cusp->handle, opA, opB, mat->alpha_one, mat->matDescr, mmdata->matBDescr, mat->beta_zero, mmdata->matCDescr, cusparse_scalartype, cusp->spmmAlg, &mmBufferSize);
PetscCallCUSPARSE(stat);
if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) {
PetscCallCUDA(cudaFree(mmdata->mmBuffer));
PetscCallCUDA(cudaMalloc(&mmdata->mmBuffer, mmBufferSize));
mmdata->mmBufferSize = mmBufferSize;
}
mmdata->initialized = PETSC_TRUE;
} else {
/* to be safe, always update pointers of the mats */
PetscCallCUSPARSE(cusparseSpMatSetValues(mat->matDescr, csrmat->values->data().get()));
PetscCallCUSPARSE(cusparseDnMatSetValues(mmdata->matBDescr, (void *)barray));
PetscCallCUSPARSE(cusparseDnMatSetValues(mmdata->matCDescr, (void *)carray));
}
/* do cusparseSpMM, which supports transpose on B */
stat = cusparseSpMM(cusp->handle, opA, opB, mat->alpha_one, mat->matDescr, mmdata->matBDescr, mat->beta_zero, mmdata->matCDescr, cusparse_scalartype, cusp->spmmAlg, mmdata->mmBuffer);
PetscCallCUSPARSE(stat);
#else
PetscInt k;
/* cusparseXcsrmm does not support transpose on B */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) {
cublasHandle_t cublasv2handle;
cublasStatus_t cerr;
PetscCall(PetscCUBLASGetHandle(&cublasv2handle));
cerr = cublasXgeam(cublasv2handle, CUBLAS_OP_T, CUBLAS_OP_T, B->cmap->n, B->rmap->n, &PETSC_CUSPARSE_ONE, barray, blda, &PETSC_CUSPARSE_ZERO, barray, blda, mmdata->Bt, B->cmap->n);
PetscCallCUBLAS(cerr);
blda = B->cmap->n;
k = B->cmap->n;
} else {
k = B->rmap->n;
}
/* perform the MatMat operation, op(A) is m x k, op(B) is k x n */
stat = cusparse_csr_spmm(cusp->handle, opA, m, n, k, csrmat->num_entries, mat->alpha_one, mat->descr, csrmat->values->data().get(), csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), mmdata->Bt ? mmdata->Bt : barray, blda, mat->beta_zero, carray, clda);
PetscCallCUSPARSE(stat);
#endif
PetscCall(PetscLogGpuTimeEnd());
PetscCall(PetscLogGpuFlops(n * 2.0 * csrmat->num_entries));
PetscCall(MatDenseRestoreArrayReadAndMemType(B, &barray));
if (product->type == MATPRODUCT_RARt) {
PetscCall(MatDenseRestoreArrayWriteAndMemType(mmdata->X, &carray));
PetscCall(MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal(B, mmdata->X, C, PETSC_FALSE, PETSC_FALSE));
} else if (product->type == MATPRODUCT_PtAP) {
PetscCall(MatDenseRestoreArrayWriteAndMemType(mmdata->X, &carray));
PetscCall(MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal(B, mmdata->X, C, PETSC_TRUE, PETSC_FALSE));
} else {
PetscCall(MatDenseRestoreArrayWriteAndMemType(C, &carray));
}
if (mmdata->cisdense) PetscCall(MatConvert(C, MATSEQDENSE, MAT_INPLACE_MATRIX, &C));
if (!biscuda) PetscCall(MatConvert(B, MATSEQDENSE, MAT_INPLACE_MATRIX, &B));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A, B;
PetscInt m, n;
PetscBool cisdense, flg;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSE *cusp;
PetscFunctionBegin;
MatCheckProduct(C, 1);
PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data not empty");
A = product->A;
B = product->B;
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name);
cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
PetscCheck(cusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format");
switch (product->type) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
break;
case MATPRODUCT_PtAP:
m = B->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_RARt:
m = B->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]);
}
PetscCall(MatSetSizes(C, m, n, m, n));
/* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */
PetscCall(PetscObjectTypeCompare((PetscObject)C, MATSEQDENSE, &cisdense));
PetscCall(MatSetType(C, MATSEQDENSECUDA));
/* product data */
PetscCall(PetscNew(&mmdata));
mmdata->cisdense = cisdense;
#if PETSC_PKG_CUDA_VERSION_LT(11, 0, 0)
/* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) PetscCallCUDA(cudaMalloc((void **)&mmdata->Bt, (size_t)B->rmap->n * (size_t)B->cmap->n * sizeof(PetscScalar)));
#endif
/* for these products we need intermediate storage */
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
PetscCall(MatCreate(PetscObjectComm((PetscObject)C), &mmdata->X));
PetscCall(MatSetType(mmdata->X, MATSEQDENSECUDA));
if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */
PetscCall(MatSetSizes(mmdata->X, A->rmap->n, B->rmap->n, A->rmap->n, B->rmap->n));
} else {
PetscCall(MatSetSizes(mmdata->X, A->rmap->n, B->cmap->n, A->rmap->n, B->cmap->n));
}
}
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A, B;
Mat_SeqAIJCUSPARSE *Acusp, *Bcusp, *Ccusp;
Mat_SeqAIJ *c = (Mat_SeqAIJ *)C->data;
Mat_SeqAIJCUSPARSEMultStruct *Amat, *Bmat, *Cmat;
CsrMatrix *Acsr, *Bcsr, *Ccsr;
PetscBool flg;
cusparseStatus_t stat;
MatProductType ptype;
MatMatCusparse *mmdata;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
cusparseSpMatDescr_t BmatSpDescr;
#endif
cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE, opB = CUSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */
PetscFunctionBegin;
MatCheckProduct(C, 1);
PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data empty");
PetscCall(PetscObjectTypeCompare((PetscObject)C, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for C of type %s", ((PetscObject)C)->type_name);
mmdata = (MatMatCusparse *)C->product->data;
A = product->A;
B = product->B;
if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */
mmdata->reusesym = PETSC_FALSE;
Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr;
PetscCheck(Ccusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format");
Cmat = Ccusp->mat;
PetscCheck(Cmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C mult struct for product type %s", MatProductTypes[C->product->type]);
Ccsr = (CsrMatrix *)Cmat->mat;
PetscCheck(Ccsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C CSR struct");
goto finalize;
}
if (!c->nz) goto finalize;
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name);
PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for B of type %s", ((PetscObject)B)->type_name);
PetscCheck(!A->boundtocpu, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
PetscCheck(!B->boundtocpu, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr;
Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr;
PetscCheck(Acusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format");
PetscCheck(Bcusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format");
PetscCheck(Ccusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format");
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCall(MatSeqAIJCUSPARSECopyToGPU(B));
ptype = product->type;
if (A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) {
ptype = MATPRODUCT_AB;
PetscCheck(product->symbolic_used_the_fact_A_is_symmetric, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Symbolic should have been built using the fact that A is symmetric");
}
if (B->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_ABt) {
ptype = MATPRODUCT_AB;
PetscCheck(product->symbolic_used_the_fact_B_is_symmetric, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Symbolic should have been built using the fact that B is symmetric");
}
switch (ptype) {
case MATPRODUCT_AB:
Amat = Acusp->mat;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_AtB:
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_ABt:
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
break;
default:
SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]);
}
Cmat = Ccusp->mat;
PetscCheck(Amat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A mult struct for product type %s", MatProductTypes[ptype]);
PetscCheck(Bmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B mult struct for product type %s", MatProductTypes[ptype]);
PetscCheck(Cmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C mult struct for product type %s", MatProductTypes[ptype]);
Acsr = (CsrMatrix *)Amat->mat;
Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix *)Bmat->mat; /* B may be in compressed row storage */
Ccsr = (CsrMatrix *)Cmat->mat;
PetscCheck(Acsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A CSR struct");
PetscCheck(Bcsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B CSR struct");
PetscCheck(Ccsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C CSR struct");
PetscCall(PetscLogGpuTimeBegin());
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */
PetscCallCUSPARSE(cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE));
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);
PetscCallCUSPARSE(stat);
#else
stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);
PetscCallCUSPARSE(stat);
stat = cusparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);
PetscCallCUSPARSE(stat);
#endif
#else
stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries,
Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());
PetscCallCUSPARSE(stat);
#endif
PetscCall(PetscLogGpuFlops(mmdata->flops));
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogGpuTimeEnd());
C->offloadmask = PETSC_OFFLOAD_GPU;
finalize:
/* shorter version of MatAssemblyEnd_SeqAIJ */
PetscCall(PetscInfo(C, "Matrix size: %" PetscInt_FMT " X %" PetscInt_FMT "; storage space: 0 unneeded,%" PetscInt_FMT " used\n", C->rmap->n, C->cmap->n, c->nz));
PetscCall(PetscInfo(C, "Number of mallocs during MatSetValues() is 0\n"));
PetscCall(PetscInfo(C, "Maximum nonzeros in any row is %" PetscInt_FMT "\n", c->rmax));
c->reallocs = 0;
C->info.mallocs += 0;
C->info.nz_unneeded = 0;
C->assembled = C->was_assembled = PETSC_TRUE;
C->num_ass++;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A, B;
Mat_SeqAIJCUSPARSE *Acusp, *Bcusp, *Ccusp;
Mat_SeqAIJ *a, *b, *c;
Mat_SeqAIJCUSPARSEMultStruct *Amat, *Bmat, *Cmat;
CsrMatrix *Acsr, *Bcsr, *Ccsr;
PetscInt i, j, m, n, k;
PetscBool flg;
cusparseStatus_t stat;
MatProductType ptype;
MatMatCusparse *mmdata;
PetscLogDouble flops;
PetscBool biscompressed, ciscompressed;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
int64_t C_num_rows1, C_num_cols1, C_nnz1;
cusparseSpMatDescr_t BmatSpDescr;
#else
int cnz;
#endif
cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE, opB = CUSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */
PetscFunctionBegin;
MatCheckProduct(C, 1);
PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data not empty");
A = product->A;
B = product->B;
PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name);
PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQAIJCUSPARSE, &flg));
PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for B of type %s", ((PetscObject)B)->type_name);
a = (Mat_SeqAIJ *)A->data;
b = (Mat_SeqAIJ *)B->data;
/* product data */
PetscCall(PetscNew(&mmdata));
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCall(MatSeqAIJCUSPARSECopyToGPU(B));
Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; /* Access spptr after MatSeqAIJCUSPARSECopyToGPU, not before */
Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr;
PetscCheck(Acusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format");
PetscCheck(Bcusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format");
ptype = product->type;
if (A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) {
ptype = MATPRODUCT_AB;
product->symbolic_used_the_fact_A_is_symmetric = PETSC_TRUE;
}
if (B->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_ABt) {
ptype = MATPRODUCT_AB;
product->symbolic_used_the_fact_B_is_symmetric = PETSC_TRUE;
}
biscompressed = PETSC_FALSE;
ciscompressed = PETSC_FALSE;
switch (ptype) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
k = A->cmap->n;
Amat = Acusp->mat;
Bmat = Bcusp->mat;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
k = A->rmap->n;
PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A));
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
k = A->cmap->n;
PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(B));
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
break;
default:
SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]);
}
/* create cusparse matrix */
PetscCall(MatSetSizes(C, m, n, m, n));
PetscCall(MatSetType(C, MATSEQAIJCUSPARSE));
c = (Mat_SeqAIJ *)C->data;
Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
c->compressedrow.use = ciscompressed;
if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */
c->compressedrow.nrows = a->compressedrow.nrows;
PetscCall(PetscMalloc2(c->compressedrow.nrows + 1, &c->compressedrow.i, c->compressedrow.nrows, &c->compressedrow.rindex));
PetscCall(PetscArraycpy(c->compressedrow.rindex, a->compressedrow.rindex, c->compressedrow.nrows));
Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices->assign(c->compressedrow.rindex, c->compressedrow.rindex + c->compressedrow.nrows);
} else {
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Cmat->cprowIndices = NULL;
}
Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = Ccusp->nrows;
Ccsr->num_cols = n;
Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows + 1);
PetscCallCUSPARSE(cusparseCreateMatDescr(&Cmat->descr));
PetscCallCUSPARSE(cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO));
PetscCallCUSPARSE(cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL));
PetscCallCUDA(cudaMalloc((void **)&(Cmat->alpha_one), sizeof(PetscScalar)));
PetscCallCUDA(cudaMalloc((void **)&(Cmat->beta_zero), sizeof(PetscScalar)));
PetscCallCUDA(cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar)));
PetscCallCUDA(cudaMemcpy(Cmat->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(Cmat->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice));
if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */
PetscCallThrust(thrust::fill(thrust::device, Ccsr->row_offsets->begin(), Ccsr->row_offsets->end(), 0));
c->nz = 0;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
goto finalizesym;
}
PetscCheck(Amat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A mult struct for product type %s", MatProductTypes[ptype]);
PetscCheck(Bmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B mult struct for product type %s", MatProductTypes[ptype]);
Acsr = (CsrMatrix *)Amat->mat;
if (!biscompressed) {
Bcsr = (CsrMatrix *)Bmat->mat;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
BmatSpDescr = Bmat->matDescr;
#endif
} else { /* we need to use row offsets for the full matrix */
CsrMatrix *cBcsr = (CsrMatrix *)Bmat->mat;
Bcsr = new CsrMatrix;
Bcsr->num_rows = B->rmap->n;
Bcsr->num_cols = cBcsr->num_cols;
Bcsr->num_entries = cBcsr->num_entries;
Bcsr->column_indices = cBcsr->column_indices;
Bcsr->values = cBcsr->values;
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i, b->i + B->rmap->n + 1);
PetscCall(PetscLogCpuToGpu((B->rmap->n + 1) * sizeof(PetscInt)));
}
Bcsr->row_offsets = Bcusp->rowoffsets_gpu;
mmdata->Bcsr = Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
if (Bcsr->num_rows && Bcsr->num_cols) {
stat = cusparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Bcsr->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);
PetscCallCUSPARSE(stat);
}
BmatSpDescr = mmdata->matSpBDescr;
#endif
}
PetscCheck(Acsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A CSR struct");
PetscCheck(Bcsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B CSR struct");
/* precompute flops count */
if (ptype == MATPRODUCT_AB) {
for (i = 0, flops = 0; i < A->rmap->n; i++) {
const PetscInt st = a->i[i];
const PetscInt en = a->i[i + 1];
for (j = st; j < en; j++) {
const PetscInt brow = a->j[j];
flops += 2. * (b->i[brow + 1] - b->i[brow]);
}
}
} else if (ptype == MATPRODUCT_AtB) {
for (i = 0, flops = 0; i < A->rmap->n; i++) {
const PetscInt anzi = a->i[i + 1] - a->i[i];
const PetscInt bnzi = b->i[i + 1] - b->i[i];
flops += (2. * anzi) * bnzi;
}
} else { /* TODO */
flops = 0.;
}
mmdata->flops = flops;
PetscCall(PetscLogGpuTimeBegin());
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscCallCUSPARSE(cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE));
stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0, NULL, NULL, NULL, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);
PetscCallCUSPARSE(stat);
PetscCallCUSPARSE(cusparseSpGEMM_createDescr(&mmdata->spgemmDesc));
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
{
/* cusparseSpGEMMreuse has more reasonable APIs than cusparseSpGEMM, so we prefer to use it.
We follow the sample code at https://github.com/NVIDIA/CUDALibrarySamples/blob/master/cuSPARSE/spgemm_reuse
*/
void *dBuffer1 = NULL;
void *dBuffer2 = NULL;
void *dBuffer3 = NULL;
/* dBuffer4, dBuffer5 are needed by cusparseSpGEMMreuse_compute, and therefore are stored in mmdata */
size_t bufferSize1 = 0;
size_t bufferSize2 = 0;
size_t bufferSize3 = 0;
size_t bufferSize4 = 0;
size_t bufferSize5 = 0;
/* ask bufferSize1 bytes for external memory */
stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, NULL);
PetscCallCUSPARSE(stat);
PetscCallCUDA(cudaMalloc((void **)&dBuffer1, bufferSize1));
/* inspect the matrices A and B to understand the memory requirement for the next step */
stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, dBuffer1);
PetscCallCUSPARSE(stat);
stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, NULL, &bufferSize3, NULL, &bufferSize4, NULL);
PetscCallCUSPARSE(stat);
PetscCallCUDA(cudaMalloc((void **)&dBuffer2, bufferSize2));
PetscCallCUDA(cudaMalloc((void **)&dBuffer3, bufferSize3));
PetscCallCUDA(cudaMalloc((void **)&mmdata->dBuffer4, bufferSize4));
stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, dBuffer2, &bufferSize3, dBuffer3, &bufferSize4, mmdata->dBuffer4);
PetscCallCUSPARSE(stat);
PetscCallCUDA(cudaFree(dBuffer1));
PetscCallCUDA(cudaFree(dBuffer2));
/* get matrix C non-zero entries C_nnz1 */
PetscCallCUSPARSE(cusparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1));
c->nz = (PetscInt)C_nnz1;
/* allocate matrix C */
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
/* update matC with the new pointers */
stat = cusparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get());
PetscCallCUSPARSE(stat);
stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, NULL);
PetscCallCUSPARSE(stat);
PetscCallCUDA(cudaMalloc((void **)&mmdata->dBuffer5, bufferSize5));
stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, mmdata->dBuffer5);
PetscCallCUSPARSE(stat);
PetscCallCUDA(cudaFree(dBuffer3));
stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);
PetscCallCUSPARSE(stat);
PetscCall(PetscInfo(C, "Buffer sizes for type %s, result %" PetscInt_FMT " x %" PetscInt_FMT " (k %" PetscInt_FMT ", nzA %" PetscInt_FMT ", nzB %" PetscInt_FMT ", nzC %" PetscInt_FMT ") are: %ldKB %ldKB\n", MatProductTypes[ptype], m, n, k, a->nz, b->nz, c->nz, bufferSize4 / 1024, bufferSize5 / 1024));
}
#else
size_t bufSize2;
/* ask bufferSize bytes for external memory */
stat = cusparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, NULL);
PetscCallCUSPARSE(stat);
PetscCallCUDA(cudaMalloc((void **)&mmdata->mmBuffer2, bufSize2));
/* inspect the matrices A and B to understand the memory requirement for the next step */
stat = cusparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2);
PetscCallCUSPARSE(stat);
/* ask bufferSize again bytes for external memory */
stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL);
PetscCallCUSPARSE(stat);
/* The CUSPARSE documentation is not clear, nor the API
We need both buffers to perform the operations properly!
mmdata->mmBuffer2 does not appear anywhere in the compute/copy API
it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address
is stored in the descriptor! What a messy API... */
PetscCallCUDA(cudaMalloc((void **)&mmdata->mmBuffer, mmdata->mmBufferSize));
/* compute the intermediate product of A * B */
stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);
PetscCallCUSPARSE(stat);
/* get matrix C non-zero entries C_nnz1 */
PetscCallCUSPARSE(cusparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1));
c->nz = (PetscInt)C_nnz1;
PetscCall(PetscInfo(C, "Buffer sizes for type %s, result %" PetscInt_FMT " x %" PetscInt_FMT " (k %" PetscInt_FMT ", nzA %" PetscInt_FMT ", nzB %" PetscInt_FMT ", nzC %" PetscInt_FMT ") are: %ldKB %ldKB\n", MatProductTypes[ptype], m, n, k, a->nz, b->nz, c->nz, bufSize2 / 1024,
mmdata->mmBufferSize / 1024));
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
stat = cusparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get());
PetscCallCUSPARSE(stat);
stat = cusparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);
PetscCallCUSPARSE(stat);
#endif // PETSC_PKG_CUDA_VERSION_GE(11,4,0)
#else
PetscCallCUSPARSE(cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_HOST));
stat = cusparseXcsrgemmNnz(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries,
Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->row_offsets->data().get(), &cnz);
PetscCallCUSPARSE(stat);
c->nz = cnz;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
PetscCallCUSPARSE(cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE));
/* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only.
I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when
D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */
stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries,
Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());
PetscCallCUSPARSE(stat);
#endif
PetscCall(PetscLogGpuFlops(mmdata->flops));
PetscCall(PetscLogGpuTimeEnd());
finalizesym:
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
PetscCall(PetscMalloc1(m + 1, &c->i));
PetscCall(PetscMalloc1(c->nz, &c->j));
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
PetscInt *d_i = c->i;
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
if (ciscompressed) d_i = c->compressedrow.i;
PetscCallCUDA(cudaMemcpy(d_i, ii.data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost));
PetscCallCUDA(cudaMemcpy(c->j, jj.data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost));
} else {
PetscInt *d_i = c->i;
if (ciscompressed) d_i = c->compressedrow.i;
PetscCallCUDA(cudaMemcpy(d_i, Ccsr->row_offsets->data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost));
PetscCallCUDA(cudaMemcpy(c->j, Ccsr->column_indices->data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost));
}
if (ciscompressed) { /* need to expand host row offsets */
PetscInt r = 0;
c->i[0] = 0;
for (k = 0; k < c->compressedrow.nrows; k++) {
const PetscInt next = c->compressedrow.rindex[k];
const PetscInt old = c->compressedrow.i[k];
for (; r < next; r++) c->i[r + 1] = old;
}
for (; r < m; r++) c->i[r + 1] = c->compressedrow.i[c->compressedrow.nrows];
}
PetscCall(PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size()) * sizeof(PetscInt)));
PetscCall(PetscMalloc1(m, &c->ilen));
PetscCall(PetscMalloc1(m, &c->imax));
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (k = 0; k < m; k++) {
const PetscInt nn = c->i[k + 1] - c->i[k];
c->ilen[k] = c->imax[k] = nn;
c->nonzerorowcnt += (PetscInt) !!nn;
c->rmax = PetscMax(c->rmax, nn);
}
PetscCall(MatMarkDiagonal_SeqAIJ(C));
PetscCall(PetscMalloc1(c->nz, &c->a));
Ccsr->num_entries = c->nz;
C->nonzerostate++;
PetscCall(PetscLayoutSetUp(C->rmap));
PetscCall(PetscLayoutSetUp(C->cmap));
Ccusp->nonzerostate = C->nonzerostate;
C->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
C->preallocated = PETSC_TRUE;
C->assembled = PETSC_FALSE;
C->was_assembled = PETSC_FALSE;
if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */
mmdata->reusesym = PETSC_TRUE;
C->offloadmask = PETSC_OFFLOAD_GPU;
}
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
PetscFunctionReturn(PETSC_SUCCESS);
}
PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat);
/* handles sparse or dense B */
static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat)
{
Mat_Product *product = mat->product;
PetscBool isdense = PETSC_FALSE, Biscusp = PETSC_FALSE, Ciscusp = PETSC_TRUE;
PetscFunctionBegin;
MatCheckProduct(mat, 1);
PetscCall(PetscObjectBaseTypeCompare((PetscObject)product->B, MATSEQDENSE, &isdense));
if (!product->A->boundtocpu && !product->B->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->B, MATSEQAIJCUSPARSE, &Biscusp));
if (product->type == MATPRODUCT_ABC) {
Ciscusp = PETSC_FALSE;
if (!product->C->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->C, MATSEQAIJCUSPARSE, &Ciscusp));
}
if (Biscusp && Ciscusp) { /* we can always select the CPU backend */
PetscBool usecpu = PETSC_FALSE;
switch (product->type) {
case MATPRODUCT_AB:
if (product->api_user) {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMult", "Mat");
PetscCall(PetscOptionsBool("-matmatmult_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL));
PetscOptionsEnd();
} else {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AB", "Mat");
PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL));
PetscOptionsEnd();
}
break;
case MATPRODUCT_AtB:
if (product->api_user) {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatTransposeMatMult", "Mat");
PetscCall(PetscOptionsBool("-mattransposematmult_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL));
PetscOptionsEnd();
} else {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AtB", "Mat");
PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL));
PetscOptionsEnd();
}
break;
case MATPRODUCT_PtAP:
if (product->api_user) {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatPtAP", "Mat");
PetscCall(PetscOptionsBool("-matptap_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL));
PetscOptionsEnd();
} else {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_PtAP", "Mat");
PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL));
PetscOptionsEnd();
}
break;
case MATPRODUCT_RARt:
if (product->api_user) {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatRARt", "Mat");
PetscCall(PetscOptionsBool("-matrart_backend_cpu", "Use CPU code", "MatRARt", usecpu, &usecpu, NULL));
PetscOptionsEnd();
} else {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_RARt", "Mat");
PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatRARt", usecpu, &usecpu, NULL));
PetscOptionsEnd();
}
break;
case MATPRODUCT_ABC:
if (product->api_user) {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMatMult", "Mat");
PetscCall(PetscOptionsBool("-matmatmatmult_backend_cpu", "Use CPU code", "MatMatMatMult", usecpu, &usecpu, NULL));
PetscOptionsEnd();
} else {
PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_ABC", "Mat");
PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMatMult", usecpu, &usecpu, NULL));
PetscOptionsEnd();
}
break;
default:
break;
}
if (usecpu) Biscusp = Ciscusp = PETSC_FALSE;
}
/* dispatch */
if (isdense) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
if (product->A->boundtocpu) {
PetscCall(MatProductSetFromOptions_SeqAIJ_SeqDense(mat));
} else {
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA;
}
break;
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else if (Biscusp && Ciscusp) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
break;
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else { /* fallback for AIJ */
PetscCall(MatProductSetFromOptions_SeqAIJ(mat));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy)
{
PetscFunctionBegin;
PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_FALSE, PETSC_FALSE));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz)
{
PetscFunctionBegin;
PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_FALSE, PETSC_FALSE));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy)
{
PetscFunctionBegin;
PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_TRUE, PETSC_TRUE));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz)
{
PetscFunctionBegin;
PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_TRUE, PETSC_TRUE));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy)
{
PetscFunctionBegin;
PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_TRUE, PETSC_FALSE));
PetscFunctionReturn(PETSC_SUCCESS);
}
__global__ static void ScatterAdd(PetscInt n, PetscInt *idx, const PetscScalar *x, PetscScalar *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[idx[i]] += x[i];
}
/* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz, PetscBool trans, PetscBool herm)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct;
PetscScalar *xarray, *zarray, *dptr, *beta, *xptr;
cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
PetscBool compressed;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscInt nx, ny;
#endif
PetscFunctionBegin;
PetscCheck(!herm || trans, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Hermitian and not transpose not supported");
if (!a->nz) {
if (yy) PetscCall(VecSeq_CUDA::Copy(yy, zz));
else PetscCall(VecSeq_CUDA::Set(zz, 0));
PetscFunctionReturn(PETSC_SUCCESS);
}
/* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
if (!trans) {
matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat;
PetscCheck(matstruct, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "SeqAIJCUSPARSE does not have a 'mat' (need to fix)");
} else {
if (herm || !A->form_explicit_transpose) {
opA = herm ? CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE;
matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat;
} else {
if (!cusparsestruct->matTranspose) PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A));
matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->matTranspose;
}
}
/* Does the matrix use compressed rows (i.e., drop zero rows)? */
compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE;
try {
PetscCall(VecCUDAGetArrayRead(xx, (const PetscScalar **)&xarray));
if (yy == zz) PetscCall(VecCUDAGetArray(zz, &zarray)); /* read & write zz, so need to get up-to-date zarray on GPU */
else PetscCall(VecCUDAGetArrayWrite(zz, &zarray)); /* write zz, so no need to init zarray on GPU */
PetscCall(PetscLogGpuTimeBegin());
if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) {
/* z = A x + beta y.
If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax.
When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call.
*/
xptr = xarray;
dptr = compressed ? cusparsestruct->workVector->data().get() : zarray;
beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
/* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is
allocated to accommodate different uses. So we get the length info directly from mat.
*/
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix *)matstruct->mat;
nx = mat->num_cols;
ny = mat->num_rows;
}
#endif
} else {
/* z = A^T x + beta y
If A is compressed, then we need a work vector as the shorter version of x to compute A^T x.
Note A^Tx is of full length, so we set beta to 1.0 if y exists.
*/
xptr = compressed ? cusparsestruct->workVector->data().get() : xarray;
dptr = zarray;
beta = yy ? matstruct->beta_one : matstruct->beta_zero;
if (compressed) { /* Scatter x to work vector */
thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray);
thrust::for_each(
#if PetscDefined(HAVE_THRUST_ASYNC)
thrust::cuda::par.on(PetscDefaultCudaStream),
#endif
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAEqualsReverse());
}
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix *)matstruct->mat;
nx = mat->num_rows;
ny = mat->num_cols;
}
#endif
}
/* csr_spmv does y = alpha op(A) x + beta y */
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscCheck(opA >= 0 && opA <= 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE ABI on cusparseOperation_t has changed and PETSc has not been updated accordingly");
if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */
PetscCallCUSPARSE(cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr, nx, xptr, cusparse_scalartype));
PetscCallCUSPARSE(cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr, ny, dptr, cusparse_scalartype));
PetscCallCUSPARSE(
cusparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, &matstruct->cuSpMV[opA].spmvBufferSize));
PetscCallCUDA(cudaMalloc(&matstruct->cuSpMV[opA].spmvBuffer, matstruct->cuSpMV[opA].spmvBufferSize));
matstruct->cuSpMV[opA].initialized = PETSC_TRUE;
} else {
/* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */
PetscCallCUSPARSE(cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr, xptr));
PetscCallCUSPARSE(cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr, dptr));
}
PetscCallCUSPARSE(cusparseSpMV(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEFormExplicitTranspose() */
matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, matstruct->cuSpMV[opA].spmvBuffer));
#else
CsrMatrix *mat = (CsrMatrix *)matstruct->mat;
PetscCallCUSPARSE(cusparse_csr_spmv(cusparsestruct->handle, opA, mat->num_rows, mat->num_cols, mat->num_entries, matstruct->alpha_one, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xptr, beta, dptr));
#endif
} else {
if (cusparsestruct->nrows) {
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat;
PetscCallCUSPARSE(cusparse_hyb_spmv(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->descr, hybMat, xptr, beta, dptr));
#endif
}
}
PetscCall(PetscLogGpuTimeEnd());
if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) {
if (yy) { /* MatMultAdd: zz = A*xx + yy */
if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */
PetscCall(VecSeq_CUDA::Copy(yy, zz)); /* zz = yy */
} else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */
PetscCall(VecSeq_CUDA::AXPY(zz, 1.0, yy)); /* zz += yy */
}
} else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */
PetscCall(VecSeq_CUDA::Set(zz, 0));
}
/* ScatterAdd the result from work vector into the full vector when A is compressed */
if (compressed) {
PetscCall(PetscLogGpuTimeBegin());
/* I wanted to make this for_each asynchronous but failed. thrust::async::for_each() returns an event (internally registered)
and in the destructor of the scope, it will call cudaStreamSynchronize() on this stream. One has to store all events to
prevent that. So I just add a ScatterAdd kernel.
*/
#if 0
thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray);
thrust::async::for_each(thrust::cuda::par.on(cusparsestruct->stream),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(),
VecCUDAPlusEquals());
#else
PetscInt n = matstruct->cprowIndices->size();
ScatterAdd<<<(n + 255) / 256, 256, 0, PetscDefaultCudaStream>>>(n, matstruct->cprowIndices->data().get(), cusparsestruct->workVector->data().get(), zarray);
#endif
PetscCall(PetscLogGpuTimeEnd());
}
} else {
if (yy && yy != zz) PetscCall(VecSeq_CUDA::AXPY(zz, 1.0, yy)); /* zz += yy */
}
PetscCall(VecCUDARestoreArrayRead(xx, (const PetscScalar **)&xarray));
if (yy == zz) PetscCall(VecCUDARestoreArray(zz, &zarray));
else PetscCall(VecCUDARestoreArrayWrite(zz, &zarray));
} catch (char *ex) {
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex);
}
if (yy) {
PetscCall(PetscLogGpuFlops(2.0 * a->nz));
} else {
PetscCall(PetscLogGpuFlops(2.0 * a->nz - a->nonzerorowcnt));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz)
{
PetscFunctionBegin;
PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_TRUE, PETSC_FALSE));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A, MatAssemblyType mode)
{
PetscObjectState onnz = A->nonzerostate;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
PetscFunctionBegin;
PetscCall(MatAssemblyEnd_SeqAIJ(A, mode));
if (onnz != A->nonzerostate && cusp->deviceMat) {
PetscCall(PetscInfo(A, "Destroy device mat since nonzerostate changed\n"));
PetscCallCUDA(cudaFree(cusp->deviceMat));
cusp->deviceMat = NULL;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@
MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in `MATAIJCUSPARSE` (compressed row) format
(the default parallel PETSc format). This matrix will ultimately pushed down
to NVIDIA GPUs and use the CuSPARSE library for calculations. For good matrix
assembly performance the user should preallocate the matrix storage by setting
the parameter `nz` (or the array `nnz`).
Collective
Input Parameters:
+ comm - MPI communicator, set to `PETSC_COMM_SELF`
. m - number of rows
. n - number of columns
. nz - number of nonzeros per row (same for all rows), ignored if `nnz` is provide
- nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or `NULL`
Output Parameter:
. A - the matrix
Level: intermediate
Notes:
It is recommended that one use the `MatCreate()`, `MatSetType()` and/or `MatSetFromOptions()`,
MatXXXXSetPreallocation() paradgm instead of this routine directly.
[MatXXXXSetPreallocation() is, for example, `MatSeqAIJSetPreallocation()`]
The AIJ format, also called
compressed row storage, is fully compatible with standard Fortran
storage. That is, the stored row and column indices can begin at
either one (as in Fortran) or zero.
Specify the preallocated storage with either nz or nnz (not both).
Set `nz` = `PETSC_DEFAULT` and `nnz` = `NULL` for PETSc to control dynamic memory
allocation.
.seealso: [](chapter_matrices), `Mat`, `MATSEQAIJCUSPARSE`, `MatCreate()`, `MatCreateAIJ()`, `MatSetValues()`, `MatSeqAIJSetColumnIndices()`, `MatCreateSeqAIJWithArrays()`, `MatCreateAIJ()`, `MATSEQAIJCUSPARSE`, `MATAIJCUSPARSE`
@*/
PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt nz, const PetscInt nnz[], Mat *A)
{
PetscFunctionBegin;
PetscCall(MatCreate(comm, A));
PetscCall(MatSetSizes(*A, m, n, m, n));
PetscCall(MatSetType(*A, MATSEQAIJCUSPARSE));
PetscCall(MatSeqAIJSetPreallocation_SeqAIJ(*A, nz, (PetscInt *)nnz));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A)
{
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
PetscCall(MatSeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE **)&A->spptr));
} else {
PetscCall(MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors **)&A->spptr));
}
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatCUSPARSESetFormat_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatCUSPARSESetUseCPUSolve_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatFactorGetSolverType_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatConvert_seqaijcusparse_hypre_C", NULL));
PetscCall(MatDestroy_SeqAIJ(A));
PetscFunctionReturn(PETSC_SUCCESS);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat, MatType, MatReuse, Mat *);
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat, PetscBool);
static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A, MatDuplicateOption cpvalues, Mat *B)
{
PetscFunctionBegin;
PetscCall(MatDuplicate_SeqAIJ(A, cpvalues, B));
PetscCall(MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B, MATSEQAIJCUSPARSE, MAT_INPLACE_MATRIX, B));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y, PetscScalar a, Mat X, MatStructure str)
{
Mat_SeqAIJ *x = (Mat_SeqAIJ *)X->data, *y = (Mat_SeqAIJ *)Y->data;
Mat_SeqAIJCUSPARSE *cy;
Mat_SeqAIJCUSPARSE *cx;
PetscScalar *ay;
const PetscScalar *ax;
CsrMatrix *csry, *csrx;
PetscFunctionBegin;
cy = (Mat_SeqAIJCUSPARSE *)Y->spptr;
cx = (Mat_SeqAIJCUSPARSE *)X->spptr;
if (X->ops->axpy != Y->ops->axpy) {
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(Y, PETSC_FALSE));
PetscCall(MatAXPY_SeqAIJ(Y, a, X, str));
PetscFunctionReturn(PETSC_SUCCESS);
}
/* if we are here, it means both matrices are bound to GPU */
PetscCall(MatSeqAIJCUSPARSECopyToGPU(Y));
PetscCall(MatSeqAIJCUSPARSECopyToGPU(X));
PetscCheck(cy->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)Y), PETSC_ERR_GPU, "only MAT_CUSPARSE_CSR supported");
PetscCheck(cx->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)X), PETSC_ERR_GPU, "only MAT_CUSPARSE_CSR supported");
csry = (CsrMatrix *)cy->mat->mat;
csrx = (CsrMatrix *)cx->mat->mat;
/* see if we can turn this into a cublas axpy */
if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) {
bool eq = thrust::equal(thrust::device, csry->row_offsets->begin(), csry->row_offsets->end(), csrx->row_offsets->begin());
if (eq) eq = thrust::equal(thrust::device, csry->column_indices->begin(), csry->column_indices->end(), csrx->column_indices->begin());
if (eq) str = SAME_NONZERO_PATTERN;
}
/* spgeam is buggy with one column */
if (Y->cmap->n == 1 && str != SAME_NONZERO_PATTERN) str = DIFFERENT_NONZERO_PATTERN;
if (str == SUBSET_NONZERO_PATTERN) {
PetscScalar b = 1.0;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
size_t bufferSize;
void *buffer;
#endif
PetscCall(MatSeqAIJCUSPARSEGetArrayRead(X, &ax));
PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay));
PetscCallCUSPARSE(cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_HOST));
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
PetscCallCUSPARSE(cusparse_csr_spgeam_bufferSize(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(),
csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), &bufferSize));
PetscCallCUDA(cudaMalloc(&buffer, bufferSize));
PetscCall(PetscLogGpuTimeBegin());
PetscCallCUSPARSE(cusparse_csr_spgeam(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(),
csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), buffer));
PetscCall(PetscLogGpuFlops(x->nz + y->nz));
PetscCall(PetscLogGpuTimeEnd());
PetscCallCUDA(cudaFree(buffer));
#else
PetscCall(PetscLogGpuTimeBegin());
PetscCallCUSPARSE(cusparse_csr_spgeam(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(),
csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get()));
PetscCall(PetscLogGpuFlops(x->nz + y->nz));
PetscCall(PetscLogGpuTimeEnd());
#endif
PetscCallCUSPARSE(cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_DEVICE));
PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(X, &ax));
PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay));
PetscCall(MatSeqAIJInvalidateDiagonal(Y));
} else if (str == SAME_NONZERO_PATTERN) {
cublasHandle_t cublasv2handle;
PetscBLASInt one = 1, bnz = 1;
PetscCall(MatSeqAIJCUSPARSEGetArrayRead(X, &ax));
PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay));
PetscCall(PetscCUBLASGetHandle(&cublasv2handle));
PetscCall(PetscBLASIntCast(x->nz, &bnz));
PetscCall(PetscLogGpuTimeBegin());
PetscCallCUBLAS(cublasXaxpy(cublasv2handle, bnz, &a, ax, one, ay, one));
PetscCall(PetscLogGpuFlops(2.0 * bnz));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(X, &ax));
PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay));
PetscCall(MatSeqAIJInvalidateDiagonal(Y));
} else {
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(Y, PETSC_FALSE));
PetscCall(MatAXPY_SeqAIJ(Y, a, X, str));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat Y, PetscScalar a)
{
Mat_SeqAIJ *y = (Mat_SeqAIJ *)Y->data;
PetscScalar *ay;
cublasHandle_t cublasv2handle;
PetscBLASInt one = 1, bnz = 1;
PetscFunctionBegin;
PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay));
PetscCall(PetscCUBLASGetHandle(&cublasv2handle));
PetscCall(PetscBLASIntCast(y->nz, &bnz));
PetscCall(PetscLogGpuTimeBegin());
PetscCallCUBLAS(cublasXscal(cublasv2handle, bnz, &a, ay, one));
PetscCall(PetscLogGpuFlops(bnz));
PetscCall(PetscLogGpuTimeEnd());
PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay));
PetscCall(MatSeqAIJInvalidateDiagonal(Y));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A)
{
PetscBool both = PETSC_FALSE;
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE *)A->spptr;
if (spptr->mat) {
CsrMatrix *matrix = (CsrMatrix *)spptr->mat->mat;
if (matrix->values) {
both = PETSC_TRUE;
thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.);
}
}
if (spptr->matTranspose) {
CsrMatrix *matrix = (CsrMatrix *)spptr->matTranspose->mat;
if (matrix->values) thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.);
}
}
PetscCall(PetscArrayzero(a->a, a->i[A->rmap->n]));
PetscCall(MatSeqAIJInvalidateDiagonal(A));
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
else A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A, PetscBool flg)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
PetscFunctionBegin;
if (A->factortype != MAT_FACTOR_NONE) {
A->boundtocpu = flg;
PetscFunctionReturn(PETSC_SUCCESS);
}
if (flg) {
PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A));
A->ops->scale = MatScale_SeqAIJ;
A->ops->axpy = MatAXPY_SeqAIJ;
A->ops->zeroentries = MatZeroEntries_SeqAIJ;
A->ops->mult = MatMult_SeqAIJ;
A->ops->multadd = MatMultAdd_SeqAIJ;
A->ops->multtranspose = MatMultTranspose_SeqAIJ;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ;
A->ops->multhermitiantranspose = NULL;
A->ops->multhermitiantransposeadd = NULL;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ;
PetscCall(PetscMemzero(a->ops, sizeof(Mat_SeqAIJOps)));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", NULL));
} else {
A->ops->scale = MatScale_SeqAIJCUSPARSE;
A->ops->axpy = MatAXPY_SeqAIJCUSPARSE;
A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE;
A->ops->mult = MatMult_SeqAIJCUSPARSE;
A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE;
A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE;
A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE;
A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE;
a->ops->getarray = MatSeqAIJGetArray_SeqAIJCUSPARSE;
a->ops->restorearray = MatSeqAIJRestoreArray_SeqAIJCUSPARSE;
a->ops->getarrayread = MatSeqAIJGetArrayRead_SeqAIJCUSPARSE;
a->ops->restorearrayread = MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE;
a->ops->getarraywrite = MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE;
a->ops->restorearraywrite = MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE;
a->ops->getcsrandmemtype = MatSeqAIJGetCSRAndMemType_SeqAIJCUSPARSE;
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", MatSeqAIJCopySubArray_SeqAIJCUSPARSE));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", MatProductSetFromOptions_SeqAIJCUSPARSE));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", MatProductSetFromOptions_SeqAIJCUSPARSE));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", MatSetPreallocationCOO_SeqAIJCUSPARSE));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", MatSetValuesCOO_SeqAIJCUSPARSE));
PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", MatProductSetFromOptions_SeqAIJCUSPARSE));
}
A->boundtocpu = flg;
if (flg && a->inode.size) {
a->inode.use = PETSC_TRUE;
} else {
a->inode.use = PETSC_FALSE;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType, MatReuse reuse, Mat *newmat)
{
Mat B;
PetscFunctionBegin;
PetscCall(PetscDeviceInitialize(PETSC_DEVICE_CUDA)); /* first use of CUSPARSE may be via MatConvert */
if (reuse == MAT_INITIAL_MATRIX) {
PetscCall(MatDuplicate(A, MAT_COPY_VALUES, newmat));
} else if (reuse == MAT_REUSE_MATRIX) {
PetscCall(MatCopy(A, *newmat, SAME_NONZERO_PATTERN));
}
B = *newmat;
PetscCall(PetscFree(B->defaultvectype));
PetscCall(PetscStrallocpy(VECCUDA, &B->defaultvectype));
if (reuse != MAT_REUSE_MATRIX && !B->spptr) {
if (B->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr;
PetscCall(PetscNew(&spptr));
PetscCallCUSPARSE(cusparseCreate(&spptr->handle));
PetscCallCUSPARSE(cusparseSetStream(spptr->handle, PetscDefaultCudaStream));
spptr->format = MAT_CUSPARSE_CSR;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
spptr->spmvAlg = CUSPARSE_SPMV_CSR_ALG1; /* default, since we only support csr */
#else
spptr->spmvAlg = CUSPARSE_CSRMV_ALG1; /* default, since we only support csr */
#endif
spptr->spmmAlg = CUSPARSE_SPMM_CSR_ALG1; /* default, only support column-major dense matrix B */
spptr->csr2cscAlg = CUSPARSE_CSR2CSC_ALG1;
#endif
B->spptr = spptr;
} else {
Mat_SeqAIJCUSPARSETriFactors *spptr;
PetscCall(PetscNew(&spptr));
PetscCallCUSPARSE(cusparseCreate(&spptr->handle));
PetscCallCUSPARSE(cusparseSetStream(spptr->handle, PetscDefaultCudaStream));
B->spptr = spptr;
}
B->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
}
B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE;
B->ops->destroy = MatDestroy_SeqAIJCUSPARSE;
B->ops->setoption = MatSetOption_SeqAIJCUSPARSE;
B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE;
B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE;
B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE;
PetscCall(MatBindToCPU_SeqAIJCUSPARSE(B, PETSC_FALSE));
PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATSEQAIJCUSPARSE));
PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_SeqAIJCUSPARSE));
#if defined(PETSC_HAVE_HYPRE)
PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_seqaijcusparse_hypre_C", MatConvert_AIJ_HYPRE));
#endif
PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetUseCPUSolve_C", MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE));
PetscFunctionReturn(PETSC_SUCCESS);
}
PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B)
{
PetscFunctionBegin;
PetscCall(MatCreate_SeqAIJ(B));
PetscCall(MatConvert_SeqAIJ_SeqAIJCUSPARSE(B, MATSEQAIJCUSPARSE, MAT_INPLACE_MATRIX, &B));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*MC
MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices.
A matrix type type whose data resides on NVIDIA GPUs. These matrices can be in either
CSR, ELL, or Hybrid format.
All matrix calculations are performed on NVIDIA GPUs using the CuSPARSE library.
Options Database Keys:
+ -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to `MatSetFromOptions()`
. -mat_cusparse_storage_format csr - sets the storage format of matrices (for `MatMult()` and factors in `MatSolve()`).
Other options include ell (ellpack) or hyb (hybrid).
. -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for `MatMult()`). Other options include ell (ellpack) or hyb (hybrid).
- -mat_cusparse_use_cpu_solve - Do `MatSolve()` on CPU
Level: beginner
.seealso: [](chapter_matrices), `Mat`, `MatCreateSeqAIJCUSPARSE()`, `MatCUSPARSESetUseCPUSolve()`, `MATAIJCUSPARSE`, `MatCreateAIJCUSPARSE()`, `MatCUSPARSESetFormat()`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation`
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat, MatFactorType, Mat *);
PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void)
{
PetscFunctionBegin;
PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSEBAND, MATSEQAIJ, MAT_FACTOR_LU, MatGetFactor_seqaijcusparse_cusparse_band));
PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_LU, MatGetFactor_seqaijcusparse_cusparse));
PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_CHOLESKY, MatGetFactor_seqaijcusparse_cusparse));
PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_ILU, MatGetFactor_seqaijcusparse_cusparse));
PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_ICC, MatGetFactor_seqaijcusparse_cusparse));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatResetPreallocationCOO_SeqAIJCUSPARSE(Mat mat)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)mat->spptr;
PetscFunctionBegin;
if (!cusp) PetscFunctionReturn(PETSC_SUCCESS);
delete cusp->cooPerm;
delete cusp->cooPerm_a;
cusp->cooPerm = NULL;
cusp->cooPerm_a = NULL;
if (cusp->use_extended_coo) {
PetscCallCUDA(cudaFree(cusp->jmap_d));
PetscCallCUDA(cudaFree(cusp->perm_d));
}
cusp->use_extended_coo = PETSC_FALSE;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct)
{
PetscFunctionBegin;
if (*cusparsestruct) {
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat, (*cusparsestruct)->format));
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose, (*cusparsestruct)->format));
delete (*cusparsestruct)->workVector;
delete (*cusparsestruct)->rowoffsets_gpu;
delete (*cusparsestruct)->cooPerm;
delete (*cusparsestruct)->cooPerm_a;
delete (*cusparsestruct)->csr2csc_i;
if ((*cusparsestruct)->handle) PetscCallCUSPARSE(cusparseDestroy((*cusparsestruct)->handle));
if ((*cusparsestruct)->jmap_d) PetscCallCUDA(cudaFree((*cusparsestruct)->jmap_d));
if ((*cusparsestruct)->perm_d) PetscCallCUDA(cudaFree((*cusparsestruct)->perm_d));
PetscCall(PetscFree(*cusparsestruct));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat)
{
PetscFunctionBegin;
if (*mat) {
delete (*mat)->values;
delete (*mat)->column_indices;
delete (*mat)->row_offsets;
delete *mat;
*mat = 0;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
#if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0)
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor)
{
PetscFunctionBegin;
if (*trifactor) {
if ((*trifactor)->descr) PetscCallCUSPARSE(cusparseDestroyMatDescr((*trifactor)->descr));
if ((*trifactor)->solveInfo) PetscCallCUSPARSE(cusparseDestroyCsrsvInfo((*trifactor)->solveInfo));
PetscCall(CsrMatrix_Destroy(&(*trifactor)->csrMat));
if ((*trifactor)->solveBuffer) PetscCallCUDA(cudaFree((*trifactor)->solveBuffer));
if ((*trifactor)->AA_h) PetscCallCUDA(cudaFreeHost((*trifactor)->AA_h));
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
if ((*trifactor)->csr2cscBuffer) PetscCallCUDA(cudaFree((*trifactor)->csr2cscBuffer));
#endif
PetscCall(PetscFree(*trifactor));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
#endif
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct, MatCUSPARSEStorageFormat format)
{
CsrMatrix *mat;
PetscFunctionBegin;
if (*matstruct) {
if ((*matstruct)->mat) {
if (format == MAT_CUSPARSE_ELL || format == MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat;
PetscCallCUSPARSE(cusparseDestroyHybMat(hybMat));
#endif
} else {
mat = (CsrMatrix *)(*matstruct)->mat;
PetscCall(CsrMatrix_Destroy(&mat));
}
}
if ((*matstruct)->descr) PetscCallCUSPARSE(cusparseDestroyMatDescr((*matstruct)->descr));
delete (*matstruct)->cprowIndices;
if ((*matstruct)->alpha_one) PetscCallCUDA(cudaFree((*matstruct)->alpha_one));
if ((*matstruct)->beta_zero) PetscCallCUDA(cudaFree((*matstruct)->beta_zero));
if ((*matstruct)->beta_one) PetscCallCUDA(cudaFree((*matstruct)->beta_one));
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct;
if (mdata->matDescr) PetscCallCUSPARSE(cusparseDestroySpMat(mdata->matDescr));
for (int i = 0; i < 3; i++) {
if (mdata->cuSpMV[i].initialized) {
PetscCallCUDA(cudaFree(mdata->cuSpMV[i].spmvBuffer));
PetscCallCUSPARSE(cusparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr));
PetscCallCUSPARSE(cusparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr));
}
}
#endif
delete *matstruct;
*matstruct = NULL;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p *trifactors)
{
Mat_SeqAIJCUSPARSETriFactors *fs = *trifactors;
PetscFunctionBegin;
if (fs) {
#if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0)
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->loTriFactorPtr));
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->upTriFactorPtr));
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->loTriFactorPtrTranspose));
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->upTriFactorPtrTranspose));
delete fs->workVector;
fs->workVector = NULL;
#endif
delete fs->rpermIndices;
delete fs->cpermIndices;
fs->rpermIndices = NULL;
fs->cpermIndices = NULL;
if (fs->a_band_d) PetscCallCUDA(cudaFree(fs->a_band_d));
if (fs->i_band_d) PetscCallCUDA(cudaFree(fs->i_band_d));
fs->init_dev_prop = PETSC_FALSE;
#if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
PetscCallCUDA(cudaFree(fs->csrRowPtr));
PetscCallCUDA(cudaFree(fs->csrColIdx));
PetscCallCUDA(cudaFree(fs->csrRowPtr32));
PetscCallCUDA(cudaFree(fs->csrColIdx32));
PetscCallCUDA(cudaFree(fs->csrVal));
PetscCallCUDA(cudaFree(fs->diag));
PetscCallCUDA(cudaFree(fs->X));
PetscCallCUDA(cudaFree(fs->Y));
// PetscCallCUDA(cudaFree(fs->factBuffer_M)); /* No needed since factBuffer_M shares with one of spsvBuffer_L/U */
PetscCallCUDA(cudaFree(fs->spsvBuffer_L));
PetscCallCUDA(cudaFree(fs->spsvBuffer_U));
PetscCallCUDA(cudaFree(fs->spsvBuffer_Lt));
PetscCallCUDA(cudaFree(fs->spsvBuffer_Ut));
PetscCallCUSPARSE(cusparseDestroyMatDescr(fs->matDescr_M));
PetscCallCUSPARSE(cusparseDestroySpMat(fs->spMatDescr_L));
PetscCallCUSPARSE(cusparseDestroySpMat(fs->spMatDescr_U));
PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_L));
PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_Lt));
PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_U));
PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_Ut));
PetscCallCUSPARSE(cusparseDestroyDnVec(fs->dnVecDescr_X));
PetscCallCUSPARSE(cusparseDestroyDnVec(fs->dnVecDescr_Y));
PetscCallCUSPARSE(cusparseDestroyCsrilu02Info(fs->ilu0Info_M));
PetscCallCUSPARSE(cusparseDestroyCsric02Info(fs->ic0Info_M));
PetscCall(PetscFree(fs->csrRowPtr_h));
PetscCall(PetscFree(fs->csrVal_h));
PetscCall(PetscFree(fs->diag_h));
fs->createdTransposeSpSVDescr = PETSC_FALSE;
fs->updatedTransposeSpSVAnalysis = PETSC_FALSE;
#endif
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors **trifactors)
{
PetscFunctionBegin;
if (*trifactors) {
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(trifactors));
PetscCallCUSPARSE(cusparseDestroy((*trifactors)->handle));
PetscCall(PetscFree(*trifactors));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
struct IJCompare {
__host__ __device__ inline bool operator()(const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct IJEqual {
__host__ __device__ inline bool operator()(const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() != t2.get<0>() || t1.get<1>() != t2.get<1>()) return false;
return true;
}
};
struct IJDiff {
__host__ __device__ inline PetscInt operator()(const PetscInt &t1, const PetscInt &t2) { return t1 == t2 ? 0 : 1; }
};
struct IJSum {
__host__ __device__ inline PetscInt operator()(const PetscInt &t1, const PetscInt &t2) { return t1 || t2; }
};
#include <thrust/iterator/discard_iterator.h>
/* Associated with MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic() */
PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE_Basic(Mat A, const PetscScalar v[], InsertMode imode)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
THRUSTARRAY *cooPerm_v = NULL;
thrust::device_ptr<const PetscScalar> d_v;
CsrMatrix *matrix;
PetscInt n;
PetscFunctionBegin;
PetscCheck(cusp, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUSPARSE struct");
PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUSPARSE CsrMatrix");
if (!cusp->cooPerm) {
PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
PetscFunctionReturn(PETSC_SUCCESS);
}
matrix = (CsrMatrix *)cusp->mat->mat;
PetscCheck(matrix->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory");
if (!v) {
if (imode == INSERT_VALUES) thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.);
goto finalize;
}
n = cusp->cooPerm->size();
if (isCudaMem(v)) {
d_v = thrust::device_pointer_cast(v);
} else {
cooPerm_v = new THRUSTARRAY(n);
cooPerm_v->assign(v, v + n);
d_v = cooPerm_v->data();
PetscCall(PetscLogCpuToGpu(n * sizeof(PetscScalar)));
}
PetscCall(PetscLogGpuTimeBegin());
if (imode == ADD_VALUES) { /* ADD VALUES means add to existing ones */
if (cusp->cooPerm_a) { /* there are repeated entries in d_v[], and we need to add these them */
THRUSTARRAY *cooPerm_w = new THRUSTARRAY(matrix->values->size());
auto vbit = thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin());
/* thrust::reduce_by_key(keys_first,keys_last,values_first,keys_output,values_output)
cooPerm_a = [0,0,1,2,3,4]. The length is n, number of nonozeros in d_v[].
cooPerm_a is ordered. d_v[i] is the cooPerm_a[i]-th unique nonzero.
*/
thrust::reduce_by_key(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), vbit, thrust::make_discard_iterator(), cooPerm_w->begin(), thrust::equal_to<PetscInt>(), thrust::plus<PetscScalar>());
thrust::transform(cooPerm_w->begin(), cooPerm_w->end(), matrix->values->begin(), matrix->values->begin(), thrust::plus<PetscScalar>());
delete cooPerm_w;
} else {
/* all nonzeros in d_v[] are unique entries */
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin()), matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->end()), matrix->values->end()));
thrust::for_each(zibit, zieit, VecCUDAPlusEquals()); /* values[i] += d_v[cooPerm[i]] */
}
} else {
if (cusp->cooPerm_a) { /* repeated entries in COO, with INSERT_VALUES -> reduce */
auto vbit = thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin());
thrust::reduce_by_key(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), vbit, thrust::make_discard_iterator(), matrix->values->begin(), thrust::equal_to<PetscInt>(), thrust::plus<PetscScalar>());
} else {
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin()), matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->end()), matrix->values->end()));
thrust::for_each(zibit, zieit, VecCUDAEquals());
}
}
PetscCall(PetscLogGpuTimeEnd());
finalize:
delete cooPerm_v;
A->offloadmask = PETSC_OFFLOAD_GPU;
PetscCall(PetscObjectStateIncrease((PetscObject)A));
/* shorter version of MatAssemblyEnd_SeqAIJ */
PetscCall(PetscInfo(A, "Matrix size: %" PetscInt_FMT " X %" PetscInt_FMT "; storage space: 0 unneeded,%" PetscInt_FMT " used\n", A->rmap->n, A->cmap->n, a->nz));
PetscCall(PetscInfo(A, "Number of mallocs during MatSetValues() is 0\n"));
PetscCall(PetscInfo(A, "Maximum nonzeros in any row is %" PetscInt_FMT "\n", a->rmax));
a->reallocs = 0;
A->info.mallocs += 0;
A->info.nz_unneeded = 0;
A->assembled = A->was_assembled = PETSC_TRUE;
A->num_ass++;
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat A, PetscBool destroy)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
PetscFunctionBegin;
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
if (!cusp) PetscFunctionReturn(PETSC_SUCCESS);
if (destroy) {
PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose, cusp->format));
delete cusp->csr2csc_i;
cusp->csr2csc_i = NULL;
}
A->transupdated = PETSC_FALSE;
PetscFunctionReturn(PETSC_SUCCESS);
}
#include <thrust/binary_search.h>
/* 'Basic' means it only works when coo_i[] and coo_j[] do not contain negative indices */
PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic(Mat A, PetscCount n, PetscInt coo_i[], PetscInt coo_j[])
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
PetscInt cooPerm_n, nzr = 0;
PetscFunctionBegin;
PetscCall(PetscLayoutSetUp(A->rmap));
PetscCall(PetscLayoutSetUp(A->cmap));
cooPerm_n = cusp->cooPerm ? cusp->cooPerm->size() : 0;
if (n != cooPerm_n) {
delete cusp->cooPerm;
delete cusp->cooPerm_a;
cusp->cooPerm = NULL;
cusp->cooPerm_a = NULL;
}
if (n) {
thrust::device_ptr<PetscInt> d_i, d_j;
PetscInt *d_raw_i, *d_raw_j;
PetscBool free_raw_i = PETSC_FALSE, free_raw_j = PETSC_FALSE;
PetscMemType imtype, jmtype;
PetscCall(PetscGetMemType(coo_i, &imtype));
if (PetscMemTypeHost(imtype)) {
PetscCallCUDA(cudaMalloc(&d_raw_i, sizeof(PetscInt) * n));
PetscCallCUDA(cudaMemcpy(d_raw_i, coo_i, sizeof(PetscInt) * n, cudaMemcpyHostToDevice));
d_i = thrust::device_pointer_cast(d_raw_i);
free_raw_i = PETSC_TRUE;
PetscCall(PetscLogCpuToGpu(1. * n * sizeof(PetscInt)));
} else {
d_i = thrust::device_pointer_cast(coo_i);
}
PetscCall(PetscGetMemType(coo_j, &jmtype));
if (PetscMemTypeHost(jmtype)) { // MatSetPreallocationCOO_MPIAIJCUSPARSE_Basic() passes device coo_i[] and host coo_j[]!
PetscCallCUDA(cudaMalloc(&d_raw_j, sizeof(PetscInt) * n));
PetscCallCUDA(cudaMemcpy(d_raw_j, coo_j, sizeof(PetscInt) * n, cudaMemcpyHostToDevice));
d_j = thrust::device_pointer_cast(d_raw_j);
free_raw_j = PETSC_TRUE;
PetscCall(PetscLogCpuToGpu(1. * n * sizeof(PetscInt)));
} else {
d_j = thrust::device_pointer_cast(coo_j);
}
THRUSTINTARRAY ii(A->rmap->n);
if (!cusp->cooPerm) cusp->cooPerm = new THRUSTINTARRAY(n);
if (!cusp->cooPerm_a) cusp->cooPerm_a = new THRUSTINTARRAY(n);
/* Ex.
n = 6
coo_i = [3,3,1,4,1,4]
coo_j = [3,2,2,5,2,6]
*/
auto fkey = thrust::make_zip_iterator(thrust::make_tuple(d_i, d_j));
auto ekey = thrust::make_zip_iterator(thrust::make_tuple(d_i + n, d_j + n));
PetscCall(PetscLogGpuTimeBegin());
thrust::sequence(thrust::device, cusp->cooPerm->begin(), cusp->cooPerm->end(), 0);
thrust::sort_by_key(fkey, ekey, cusp->cooPerm->begin(), IJCompare()); /* sort by row, then by col */
(*cusp->cooPerm_a).assign(d_i, d_i + n); /* copy the sorted array */
THRUSTINTARRAY w(d_j, d_j + n);
/*
d_i = [1,1,3,3,4,4]
d_j = [2,2,2,3,5,6]
cooPerm = [2,4,1,0,3,5]
*/
auto nekey = thrust::unique(fkey, ekey, IJEqual()); /* unique (d_i, d_j) */
/*
d_i = [1,3,3,4,4,x]
^ekey
d_j = [2,2,3,5,6,x]
^nekye
*/
if (nekey == ekey) { /* all entries are unique */
delete cusp->cooPerm_a;
cusp->cooPerm_a = NULL;
} else { /* Stefano: I couldn't come up with a more elegant algorithm */
/* idea: any change in i or j in the (i,j) sequence implies a new nonzero */
adjacent_difference(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), cusp->cooPerm_a->begin(), IJDiff()); /* cooPerm_a: [1,1,3,3,4,4] => [1,0,1,0,1,0]*/
adjacent_difference(w.begin(), w.end(), w.begin(), IJDiff()); /* w: [2,2,2,3,5,6] => [2,0,0,1,1,1]*/
(*cusp->cooPerm_a)[0] = 0; /* clear the first entry, though accessing an entry on device implies a cudaMemcpy */
w[0] = 0;
thrust::transform(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), w.begin(), cusp->cooPerm_a->begin(), IJSum()); /* cooPerm_a = [0,0,1,1,1,1]*/
thrust::inclusive_scan(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), cusp->cooPerm_a->begin(), thrust::plus<PetscInt>()); /*cooPerm_a=[0,0,1,2,3,4]*/
}
thrust::counting_iterator<PetscInt> search_begin(0);
thrust::upper_bound(d_i, nekey.get_iterator_tuple().get<0>(), /* binary search entries of [0,1,2,3,4,5,6) in ordered array d_i = [1,3,3,4,4], supposing A->rmap->n = 6. */
search_begin, search_begin + A->rmap->n, /* return in ii[] the index of last position in d_i[] where value could be inserted without violating the ordering */
ii.begin()); /* ii = [0,1,1,3,5,5]. A leading 0 will be added later */
PetscCall(PetscLogGpuTimeEnd());
PetscCall(MatSeqXAIJFreeAIJ(A, &a->a, &a->j, &a->i));
a->singlemalloc = PETSC_FALSE;
a->free_a = PETSC_TRUE;
a->free_ij = PETSC_TRUE;
PetscCall(PetscMalloc1(A->rmap->n + 1, &a->i));
a->i[0] = 0; /* a->i = [0,0,1,1,3,5,5] */
PetscCallCUDA(cudaMemcpy(a->i + 1, ii.data().get(), A->rmap->n * sizeof(PetscInt), cudaMemcpyDeviceToHost));
a->nz = a->maxnz = a->i[A->rmap->n];
a->rmax = 0;
PetscCall(PetscMalloc1(a->nz, &a->a));
PetscCall(PetscMalloc1(a->nz, &a->j));
PetscCallCUDA(cudaMemcpy(a->j, thrust::raw_pointer_cast(d_j), a->nz * sizeof(PetscInt), cudaMemcpyDeviceToHost));
if (!a->ilen) PetscCall(PetscMalloc1(A->rmap->n, &a->ilen));
if (!a->imax) PetscCall(PetscMalloc1(A->rmap->n, &a->imax));
for (PetscInt i = 0; i < A->rmap->n; i++) {
const PetscInt nnzr = a->i[i + 1] - a->i[i];
nzr += (PetscInt) !!(nnzr);
a->ilen[i] = a->imax[i] = nnzr;
a->rmax = PetscMax(a->rmax, nnzr);
}
a->nonzerorowcnt = nzr;
A->preallocated = PETSC_TRUE;
PetscCall(PetscLogGpuToCpu((A->rmap->n + a->nz) * sizeof(PetscInt)));
PetscCall(MatMarkDiagonal_SeqAIJ(A));
if (free_raw_i) PetscCallCUDA(cudaFree(d_raw_i));
if (free_raw_j) PetscCallCUDA(cudaFree(d_raw_j));
} else {
PetscCall(MatSeqAIJSetPreallocation(A, 0, NULL));
}
PetscCall(MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE));
/* We want to allocate the CUSPARSE struct for matvec now.
The code is so convoluted now that I prefer to copy zeros */
PetscCall(PetscArrayzero(a->a, a->nz));
PetscCall(MatCheckCompressedRow(A, nzr, &a->compressedrow, a->i, A->rmap->n, 0.6));
A->offloadmask = PETSC_OFFLOAD_CPU;
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat mat, PetscCount coo_n, PetscInt coo_i[], PetscInt coo_j[])
{
Mat_SeqAIJ *seq;
Mat_SeqAIJCUSPARSE *dev;
PetscBool coo_basic = PETSC_TRUE;
PetscMemType mtype = PETSC_MEMTYPE_DEVICE;
PetscFunctionBegin;
PetscCall(MatResetPreallocationCOO_SeqAIJ(mat));
PetscCall(MatResetPreallocationCOO_SeqAIJCUSPARSE(mat));
if (coo_i) {
PetscCall(PetscGetMemType(coo_i, &mtype));
if (PetscMemTypeHost(mtype)) {
for (PetscCount k = 0; k < coo_n; k++) {
if (coo_i[k] < 0 || coo_j[k] < 0) {
coo_basic = PETSC_FALSE;
break;
}
}
}
}
if (coo_basic) { /* i,j are on device or do not contain negative indices */
PetscCall(MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic(mat, coo_n, coo_i, coo_j));
} else {
PetscCall(MatSetPreallocationCOO_SeqAIJ(mat, coo_n, coo_i, coo_j));
mat->offloadmask = PETSC_OFFLOAD_CPU;
PetscCall(MatSeqAIJCUSPARSECopyToGPU(mat));
seq = static_cast<Mat_SeqAIJ *>(mat->data);
dev = static_cast<Mat_SeqAIJCUSPARSE *>(mat->spptr);
PetscCallCUDA(cudaMalloc((void **)&dev->jmap_d, (seq->nz + 1) * sizeof(PetscCount)));
PetscCallCUDA(cudaMemcpy(dev->jmap_d, seq->jmap, (seq->nz + 1) * sizeof(PetscCount), cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMalloc((void **)&dev->perm_d, seq->Atot * sizeof(PetscCount)));
PetscCallCUDA(cudaMemcpy(dev->perm_d, seq->perm, seq->Atot * sizeof(PetscCount), cudaMemcpyHostToDevice));
dev->use_extended_coo = PETSC_TRUE;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
__global__ static void MatAddCOOValues(const PetscScalar kv[], PetscCount nnz, const PetscCount jmap[], const PetscCount perm[], InsertMode imode, PetscScalar a[])
{
PetscCount i = blockIdx.x * blockDim.x + threadIdx.x;
const PetscCount grid_size = gridDim.x * blockDim.x;
for (; i < nnz; i += grid_size) {
PetscScalar sum = 0.0;
for (PetscCount k = jmap[i]; k < jmap[i + 1]; k++) sum += kv[perm[k]];
a[i] = (imode == INSERT_VALUES ? 0.0 : a[i]) + sum;
}
}
PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode)
{
Mat_SeqAIJ *seq = (Mat_SeqAIJ *)A->data;
Mat_SeqAIJCUSPARSE *dev = (Mat_SeqAIJCUSPARSE *)A->spptr;
PetscCount Annz = seq->nz;
PetscMemType memtype;
const PetscScalar *v1 = v;
PetscScalar *Aa;
PetscFunctionBegin;
if (dev->use_extended_coo) {
PetscCall(PetscGetMemType(v, &memtype));
if (PetscMemTypeHost(memtype)) { /* If user gave v[] in host, we might need to copy it to device if any */
PetscCallCUDA(cudaMalloc((void **)&v1, seq->coo_n * sizeof(PetscScalar)));
PetscCallCUDA(cudaMemcpy((void *)v1, v, seq->coo_n * sizeof(PetscScalar), cudaMemcpyHostToDevice));
}
if (imode == INSERT_VALUES) PetscCall(MatSeqAIJCUSPARSEGetArrayWrite(A, &Aa));
else PetscCall(MatSeqAIJCUSPARSEGetArray(A, &Aa));
if (Annz) {
MatAddCOOValues<<<(Annz + 255) / 256, 256>>>(v1, Annz, dev->jmap_d, dev->perm_d, imode, Aa);
PetscCallCUDA(cudaPeekAtLastError());
}
if (imode == INSERT_VALUES) PetscCall(MatSeqAIJCUSPARSERestoreArrayWrite(A, &Aa));
else PetscCall(MatSeqAIJCUSPARSERestoreArray(A, &Aa));
if (PetscMemTypeHost(memtype)) PetscCallCUDA(cudaFree((void *)v1));
} else {
PetscCall(MatSetValuesCOO_SeqAIJCUSPARSE_Basic(A, v, imode));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSEGetIJ - returns the device row storage `i` and `j` indices for `MATSEQAIJCUSPARSE` matrices.
Not Collective
Input Parameters:
+ A - the matrix
- compressed - `PETSC_TRUE` or `PETSC_FALSE` indicating the matrix data structure should be always returned in compressed form
Output Parameters:
+ i - the CSR row pointers
- j - the CSR column indices
Level: developer
Note:
When compressed is true, the CSR structure does not contain empty rows
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSERestoreIJ()`, `MatSeqAIJCUSPARSEGetArrayRead()`
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetIJ(Mat A, PetscBool compressed, const int **i, const int **j)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
CsrMatrix *csr;
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
if (!i || !j) PetscFunctionReturn(PETSC_SUCCESS);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented");
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix *)cusp->mat->mat;
if (i) {
if (!compressed && a->compressedrow.use) { /* need full row offset */
if (!cusp->rowoffsets_gpu) {
cusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
cusp->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1);
PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt)));
}
*i = cusp->rowoffsets_gpu->data().get();
} else *i = csr->row_offsets->data().get();
}
if (j) *j = csr->column_indices->data().get();
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSERestoreIJ - restore the device row storage `i` and `j` indices obtained with `MatSeqAIJCUSPARSEGetIJ()`
Not Collective
Input Parameters:
+ A - the matrix
. compressed - `PETSC_TRUE` or `PETSC_FALSE` indicating the matrix data structure should be always returned in compressed form
. i - the CSR row pointers
- j - the CSR column indices
Level: developer
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetIJ()`
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreIJ(Mat A, PetscBool compressed, const int **i, const int **j)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
if (i) *i = NULL;
if (j) *j = NULL;
(void)compressed;
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSEGetArrayRead - gives read-only access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored
Not Collective
Input Parameter:
. A - a `MATSEQAIJCUSPARSE` matrix
Output Parameter:
. a - pointer to the device data
Level: developer
Note:
May trigger host-device copies if up-to-date matrix data is on host
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()`, `MatSeqAIJCUSPARSEGetArrayWrite()`, `MatSeqAIJCUSPARSERestoreArrayRead()`
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar **a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
CsrMatrix *csr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscValidPointer(a, 2);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented");
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix *)cusp->mat->mat;
PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory");
*a = csr->values->data().get();
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSERestoreArrayRead - restore the read-only access array obtained from `MatSeqAIJCUSPARSEGetArrayRead()`
Not Collective
Input Parameters:
+ A - a `MATSEQAIJCUSPARSE` matrix
- a - pointer to the device data
Level: developer
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayRead()`
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar **a)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscValidPointer(a, 2);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
*a = NULL;
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSEGetArray - gives read-write access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored
Not Collective
Input Parameter:
. A - a `MATSEQAIJCUSPARSE` matrix
Output Parameter:
. a - pointer to the device data
Level: developer
Note:
May trigger host-device copies if up-to-date matrix data is on host
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayRead()`, `MatSeqAIJCUSPARSEGetArrayWrite()`, `MatSeqAIJCUSPARSERestoreArray()`
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar **a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
CsrMatrix *csr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscValidPointer(a, 2);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented");
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix *)cusp->mat->mat;
PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSERestoreArray - restore the read-write access array obtained from `MatSeqAIJCUSPARSEGetArray()`
Not Collective
Input Parameters:
+ A - a `MATSEQAIJCUSPARSE` matrix
- a - pointer to the device data
Level: developer
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()`
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscValidPointer(a, 2);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
PetscCall(MatSeqAIJInvalidateDiagonal(A));
PetscCall(PetscObjectStateIncrease((PetscObject)A));
*a = NULL;
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSEGetArrayWrite - gives write access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored
Not Collective
Input Parameter:
. A - a `MATSEQAIJCUSPARSE` matrix
Output Parameter:
. a - pointer to the device data
Level: developer
Note:
Does not trigger host-device copies and flags data validity on the GPU
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()`, `MatSeqAIJCUSPARSEGetArrayRead()`, `MatSeqAIJCUSPARSERestoreArrayWrite()`
@*/
PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar **a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr;
CsrMatrix *csr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscValidPointer(a, 2);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented");
PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
csr = (CsrMatrix *)cusp->mat->mat;
PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatSeqAIJCUSPARSERestoreArrayWrite - restore the write-only access array obtained from `MatSeqAIJCUSPARSEGetArrayWrite()`
Not Collective
Input Parameters:
+ A - a `MATSEQAIJCUSPARSE` matrix
- a - pointer to the device data
Level: developer
.seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayWrite()`
@*/
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscValidPointer(a, 2);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
PetscCall(MatSeqAIJInvalidateDiagonal(A));
PetscCall(PetscObjectStateIncrease((PetscObject)A));
*a = NULL;
PetscFunctionReturn(PETSC_SUCCESS);
}
struct IJCompare4 {
__host__ __device__ inline bool operator()(const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct Shift {
int _shift;
Shift(int shift) : _shift(shift) { }
__host__ __device__ inline int operator()(const int &c) { return c + _shift; }
};
/* merges two SeqAIJCUSPARSE matrices A, B by concatenating their rows. [A';B']' operation in matlab notation */
PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A, Mat B, MatReuse reuse, Mat *C)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data, *b = (Mat_SeqAIJ *)B->data, *c;
Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr, *Ccusp;
Mat_SeqAIJCUSPARSEMultStruct *Cmat;
CsrMatrix *Acsr, *Bcsr, *Ccsr;
PetscInt Annz, Bnnz;
cusparseStatus_t stat;
PetscInt i, m, n, zero = 0;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
PetscValidHeaderSpecific(B, MAT_CLASSID, 2);
PetscValidPointer(C, 4);
PetscCheckTypeName(A, MATSEQAIJCUSPARSE);
PetscCheckTypeName(B, MATSEQAIJCUSPARSE);
PetscCheck(A->rmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Invalid number or rows %" PetscInt_FMT " != %" PetscInt_FMT, A->rmap->n, B->rmap->n);
PetscCheck(reuse != MAT_INPLACE_MATRIX, PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_INPLACE_MATRIX not supported");
PetscCheck(Acusp->format != MAT_CUSPARSE_ELL && Acusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented");
PetscCheck(Bcusp->format != MAT_CUSPARSE_ELL && Bcusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented");
if (reuse == MAT_INITIAL_MATRIX) {
m = A->rmap->n;
n = A->cmap->n + B->cmap->n;
PetscCall(MatCreate(PETSC_COMM_SELF, C));
PetscCall(MatSetSizes(*C, m, n, m, n));
PetscCall(MatSetType(*C, MATSEQAIJCUSPARSE));
c = (Mat_SeqAIJ *)(*C)->data;
Ccusp = (Mat_SeqAIJCUSPARSE *)(*C)->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
Cmat->cprowIndices = NULL;
c->compressedrow.use = PETSC_FALSE;
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Ccusp->nrows = m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = m;
Ccsr->num_cols = n;
PetscCallCUSPARSE(cusparseCreateMatDescr(&Cmat->descr));
PetscCallCUSPARSE(cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO));
PetscCallCUSPARSE(cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL));
PetscCallCUDA(cudaMalloc((void **)&(Cmat->alpha_one), sizeof(PetscScalar)));
PetscCallCUDA(cudaMalloc((void **)&(Cmat->beta_zero), sizeof(PetscScalar)));
PetscCallCUDA(cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar)));
PetscCallCUDA(cudaMemcpy(Cmat->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(Cmat->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice));
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCall(MatSeqAIJCUSPARSECopyToGPU(B));
PetscCheck(Acusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
PetscCheck(Bcusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix *)Acusp->mat->mat;
Bcsr = (CsrMatrix *)Bcusp->mat->mat;
Annz = (PetscInt)Acsr->column_indices->size();
Bnnz = (PetscInt)Bcsr->column_indices->size();
c->nz = Annz + Bnnz;
Ccsr->row_offsets = new THRUSTINTARRAY32(m + 1);
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
Ccsr->num_entries = c->nz;
Ccusp->cooPerm = new THRUSTINTARRAY(c->nz);
if (c->nz) {
auto Acoo = new THRUSTINTARRAY32(Annz);
auto Bcoo = new THRUSTINTARRAY32(Bnnz);
auto Ccoo = new THRUSTINTARRAY32(c->nz);
THRUSTINTARRAY32 *Aroff, *Broff;
if (a->compressedrow.use) { /* need full row offset */
if (!Acusp->rowoffsets_gpu) {
Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
Acusp->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1);
PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt)));
}
Aroff = Acusp->rowoffsets_gpu;
} else Aroff = Acsr->row_offsets;
if (b->compressedrow.use) { /* need full row offset */
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i, b->i + B->rmap->n + 1);
PetscCall(PetscLogCpuToGpu((B->rmap->n + 1) * sizeof(PetscInt)));
}
Broff = Bcusp->rowoffsets_gpu;
} else Broff = Bcsr->row_offsets;
PetscCall(PetscLogGpuTimeBegin());
stat = cusparseXcsr2coo(Acusp->handle, Aroff->data().get(), Annz, m, Acoo->data().get(), CUSPARSE_INDEX_BASE_ZERO);
PetscCallCUSPARSE(stat);
stat = cusparseXcsr2coo(Bcusp->handle, Broff->data().get(), Bnnz, m, Bcoo->data().get(), CUSPARSE_INDEX_BASE_ZERO);
PetscCallCUSPARSE(stat);
/* Issues when using bool with large matrices on SUMMIT 10.2.89 */
auto Aperm = thrust::make_constant_iterator(1);
auto Bperm = thrust::make_constant_iterator(0);
#if PETSC_PKG_CUDA_VERSION_GE(10, 0, 0)
auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(), Shift(A->cmap->n));
auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(), Shift(A->cmap->n));
#else
/* there are issues instantiating the merge operation using a transform iterator for the columns of B */
auto Bcib = Bcsr->column_indices->begin();
auto Bcie = Bcsr->column_indices->end();
thrust::transform(Bcib, Bcie, Bcib, Shift(A->cmap->n));
#endif
auto wPerm = new THRUSTINTARRAY32(Annz + Bnnz);
auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(), Acsr->column_indices->begin(), Acsr->values->begin(), Aperm));
auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(), Acsr->column_indices->end(), Acsr->values->end(), Aperm));
auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(), Bcib, Bcsr->values->begin(), Bperm));
auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(), Bcie, Bcsr->values->end(), Bperm));
auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(), Ccsr->column_indices->begin(), Ccsr->values->begin(), wPerm->begin()));
auto p1 = Ccusp->cooPerm->begin();
auto p2 = Ccusp->cooPerm->begin();
thrust::advance(p2, Annz);
PetscCallThrust(thrust::merge(thrust::device, Azb, Aze, Bzb, Bze, Czb, IJCompare4()));
#if PETSC_PKG_CUDA_VERSION_LT(10, 0, 0)
thrust::transform(Bcib, Bcie, Bcib, Shift(-A->cmap->n));
#endif
auto cci = thrust::make_counting_iterator(zero);
auto cce = thrust::make_counting_iterator(c->nz);
#if 0 //Errors on SUMMIT cuda 11.1.0
PetscCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>()));
#else
auto pred = thrust::identity<int>();
PetscCallThrust(thrust::copy_if(thrust::device, cci, cce, wPerm->begin(), p1, pred));
PetscCallThrust(thrust::remove_copy_if(thrust::device, cci, cce, wPerm->begin(), p2, pred));
#endif
stat = cusparseXcoo2csr(Ccusp->handle, Ccoo->data().get(), c->nz, m, Ccsr->row_offsets->data().get(), CUSPARSE_INDEX_BASE_ZERO);
PetscCallCUSPARSE(stat);
PetscCall(PetscLogGpuTimeEnd());
delete wPerm;
delete Acoo;
delete Bcoo;
delete Ccoo;
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);
PetscCallCUSPARSE(stat);
#endif
if (A->form_explicit_transpose && B->form_explicit_transpose) { /* if A and B have the transpose, generate C transpose too */
PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A));
PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(B));
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct;
CsrMatrix *CcsrT = new CsrMatrix;
CsrMatrix *AcsrT = AT ? (CsrMatrix *)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix *)Bcusp->matTranspose->mat : NULL;
(*C)->form_explicit_transpose = PETSC_TRUE;
(*C)->transupdated = PETSC_TRUE;
Ccusp->rowoffsets_gpu = NULL;
CmatT->cprowIndices = NULL;
CmatT->mat = CcsrT;
CcsrT->num_rows = n;
CcsrT->num_cols = m;
CcsrT->num_entries = c->nz;
CcsrT->row_offsets = new THRUSTINTARRAY32(n + 1);
CcsrT->column_indices = new THRUSTINTARRAY32(c->nz);
CcsrT->values = new THRUSTARRAY(c->nz);
PetscCall(PetscLogGpuTimeBegin());
auto rT = CcsrT->row_offsets->begin();
if (AT) {
rT = thrust::copy(AcsrT->row_offsets->begin(), AcsrT->row_offsets->end(), rT);
thrust::advance(rT, -1);
}
if (BT) {
auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(), Shift(a->nz));
auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(), Shift(a->nz));
thrust::copy(titb, tite, rT);
}
auto cT = CcsrT->column_indices->begin();
if (AT) cT = thrust::copy(AcsrT->column_indices->begin(), AcsrT->column_indices->end(), cT);
if (BT) thrust::copy(BcsrT->column_indices->begin(), BcsrT->column_indices->end(), cT);
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(), AcsrT->values->end(), vT);
if (BT) thrust::copy(BcsrT->values->begin(), BcsrT->values->end(), vT);
PetscCall(PetscLogGpuTimeEnd());
PetscCallCUSPARSE(cusparseCreateMatDescr(&CmatT->descr));
PetscCallCUSPARSE(cusparseSetMatIndexBase(CmatT->descr, CUSPARSE_INDEX_BASE_ZERO));
PetscCallCUSPARSE(cusparseSetMatType(CmatT->descr, CUSPARSE_MATRIX_TYPE_GENERAL));
PetscCallCUDA(cudaMalloc((void **)&(CmatT->alpha_one), sizeof(PetscScalar)));
PetscCallCUDA(cudaMalloc((void **)&(CmatT->beta_zero), sizeof(PetscScalar)));
PetscCallCUDA(cudaMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar)));
PetscCallCUDA(cudaMemcpy(CmatT->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(CmatT->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice));
PetscCallCUDA(cudaMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice));
#if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
stat = cusparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries, CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);
PetscCallCUSPARSE(stat);
#endif
Ccusp->matTranspose = CmatT;
}
}
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
PetscCall(PetscMalloc1(m + 1, &c->i));
PetscCall(PetscMalloc1(c->nz, &c->j));
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
PetscCallCUDA(cudaMemcpy(c->i, ii.data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost));
PetscCallCUDA(cudaMemcpy(c->j, jj.data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost));
} else {
PetscCallCUDA(cudaMemcpy(c->i, Ccsr->row_offsets->data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost));
PetscCallCUDA(cudaMemcpy(c->j, Ccsr->column_indices->data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost));
}
PetscCall(PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size()) * sizeof(PetscInt)));
PetscCall(PetscMalloc1(m, &c->ilen));
PetscCall(PetscMalloc1(m, &c->imax));
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (i = 0; i < m; i++) {
const PetscInt nn = c->i[i + 1] - c->i[i];
c->ilen[i] = c->imax[i] = nn;
c->nonzerorowcnt += (PetscInt) !!nn;
c->rmax = PetscMax(c->rmax, nn);
}
PetscCall(MatMarkDiagonal_SeqAIJ(*C));
PetscCall(PetscMalloc1(c->nz, &c->a));
(*C)->nonzerostate++;
PetscCall(PetscLayoutSetUp((*C)->rmap));
PetscCall(PetscLayoutSetUp((*C)->cmap));
Ccusp->nonzerostate = (*C)->nonzerostate;
(*C)->preallocated = PETSC_TRUE;
} else {
PetscCheck((*C)->rmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Invalid number or rows %" PetscInt_FMT " != %" PetscInt_FMT, (*C)->rmap->n, B->rmap->n);
c = (Mat_SeqAIJ *)(*C)->data;
if (c->nz) {
Ccusp = (Mat_SeqAIJCUSPARSE *)(*C)->spptr;
PetscCheck(Ccusp->cooPerm, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cooPerm");
PetscCheck(Ccusp->format != MAT_CUSPARSE_ELL && Ccusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented");
PetscCheck(Ccusp->nonzerostate == (*C)->nonzerostate, PETSC_COMM_SELF, PETSC_ERR_COR, "Wrong nonzerostate");
PetscCall(MatSeqAIJCUSPARSECopyToGPU(A));
PetscCall(MatSeqAIJCUSPARSECopyToGPU(B));
PetscCheck(Acusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
PetscCheck(Bcusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix *)Acusp->mat->mat;
Bcsr = (CsrMatrix *)Bcusp->mat->mat;
Ccsr = (CsrMatrix *)Ccusp->mat->mat;
PetscCheck(Acsr->num_entries == (PetscInt)Acsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "A nnz %" PetscInt_FMT " != %" PetscInt_FMT, Acsr->num_entries, (PetscInt)Acsr->values->size());
PetscCheck(Bcsr->num_entries == (PetscInt)Bcsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "B nnz %" PetscInt_FMT " != %" PetscInt_FMT, Bcsr->num_entries, (PetscInt)Bcsr->values->size());
PetscCheck(Ccsr->num_entries == (PetscInt)Ccsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "C nnz %" PetscInt_FMT " != %" PetscInt_FMT, Ccsr->num_entries, (PetscInt)Ccsr->values->size());
PetscCheck(Ccsr->num_entries == Acsr->num_entries + Bcsr->num_entries, PETSC_COMM_SELF, PETSC_ERR_COR, "C nnz %" PetscInt_FMT " != %" PetscInt_FMT " + %" PetscInt_FMT, Ccsr->num_entries, Acsr->num_entries, Bcsr->num_entries);
PetscCheck(Ccusp->cooPerm->size() == Ccsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "permSize %" PetscInt_FMT " != %" PetscInt_FMT, (PetscInt)Ccusp->cooPerm->size(), (PetscInt)Ccsr->values->size());
auto pmid = Ccusp->cooPerm->begin();
thrust::advance(pmid, Acsr->num_entries);
PetscCall(PetscLogGpuTimeBegin());
auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(), Ccusp->cooPerm->begin())));
auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(), pmid)));
thrust::for_each(zibait, zieait, VecCUDAEquals());
auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(), pmid)));
auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(), Ccusp->cooPerm->end())));
thrust::for_each(zibbit, ziebit, VecCUDAEquals());
PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(*C, PETSC_FALSE));
if (A->form_explicit_transpose && B->form_explicit_transpose && (*C)->form_explicit_transpose) {
PetscCheck(Ccusp->matTranspose, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing transpose Mat_SeqAIJCUSPARSEMultStruct");
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
CsrMatrix *AcsrT = AT ? (CsrMatrix *)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix *)Bcusp->matTranspose->mat : NULL;
CsrMatrix *CcsrT = (CsrMatrix *)Ccusp->matTranspose->mat;
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(), AcsrT->values->end(), vT);
if (BT) thrust::copy(BcsrT->values->begin(), BcsrT->values->end(), vT);
(*C)->transupdated = PETSC_TRUE;
}
PetscCall(PetscLogGpuTimeEnd());
}
}
PetscCall(PetscObjectStateIncrease((PetscObject)*C));
(*C)->assembled = PETSC_TRUE;
(*C)->was_assembled = PETSC_FALSE;
(*C)->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[])
{
bool dmem;
const PetscScalar *av;
PetscFunctionBegin;
dmem = isCudaMem(v);
PetscCall(MatSeqAIJCUSPARSEGetArrayRead(A, &av));
if (n && idx) {
THRUSTINTARRAY widx(n);
widx.assign(idx, idx + n);
PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt)));
THRUSTARRAY *w = NULL;
thrust::device_ptr<PetscScalar> dv;
if (dmem) {
dv = thrust::device_pointer_cast(v);
} else {
w = new THRUSTARRAY(n);
dv = w->data();
}
thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av);
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav, widx.begin()), dv));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav, widx.end()), dv + n));
thrust::for_each(zibit, zieit, VecCUDAEquals());
if (w) PetscCallCUDA(cudaMemcpy(v, w->data().get(), n * sizeof(PetscScalar), cudaMemcpyDeviceToHost));
delete w;
} else {
PetscCallCUDA(cudaMemcpy(v, av, n * sizeof(PetscScalar), dmem ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost));
}
if (!dmem) PetscCall(PetscLogCpuToGpu(n * sizeof(PetscScalar)));
PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(A, &av));
PetscFunctionReturn(PETSC_SUCCESS);
}
|
8655cc73dceaa75d562de6d251dd9b977b86b9fe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void ElementWiseMultiply_CUDA(double *C, double *A, double *B, int rows, int cols)
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
int i = blockDim.y * blockIdx.y + threadIdx.y;
int sourceLength = cols * rows;
int sourceIndex = i + (j * blockDim.y);
int targetIndex = i + (j * blockDim.y);
if ((sourceIndex <= sourceLength - 1) & (targetIndex < rows))
{
//if (i == 0 & j == 0)
//{
// printf("ElementWiseMultiply_CUDA, matrix A:\r\n");
// printMatrix_CUDA << <1, 1 >> > (A, dimA);
// printf("ElementWiseMultiply_CUDA, matrix B:\r\n");
// printMatrix_CUDA << <1, 1 >> > (B, dimB);
//}
//int idx = i + (j * dimC.y);
double a = A[sourceIndex];
double b = B[sourceIndex];
C[targetIndex] = a * b;
//printf("i=%i, j=%i idx=%i | %i = %i * %i\r\n", i, j, idx, C[idx], a, b);
}
}
|
8655cc73dceaa75d562de6d251dd9b977b86b9fe.cu
|
#include "includes.h"
__global__ void ElementWiseMultiply_CUDA(double *C, double *A, double *B, int rows, int cols)
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
int i = blockDim.y * blockIdx.y + threadIdx.y;
int sourceLength = cols * rows;
int sourceIndex = i + (j * blockDim.y);
int targetIndex = i + (j * blockDim.y);
if ((sourceIndex <= sourceLength - 1) & (targetIndex < rows))
{
//if (i == 0 & j == 0)
//{
// printf("ElementWiseMultiply_CUDA, matrix A:\r\n");
// printMatrix_CUDA << <1, 1 >> > (A, dimA);
// printf("ElementWiseMultiply_CUDA, matrix B:\r\n");
// printMatrix_CUDA << <1, 1 >> > (B, dimB);
//}
//int idx = i + (j * dimC.y);
double a = A[sourceIndex];
double b = B[sourceIndex];
C[targetIndex] = a * b;
//printf("i=%i, j=%i idx=%i | %i = %i * %i\r\n", i, j, idx, C[idx], a, b);
}
}
|
a68fc89ecfaa28ec9ee02ca0dc3a3e89a18e1892.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//cuMatrix/cuKernel.h
// Copyright 2015-2-2 (Author: xutao)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
# include <math.h>
# include "cuKernel-ansi.h"
# include <hip/hip_runtime.h>
# include <rocblas.h>
# include <hip/hip_runtime.h>
# include <hiprand/hiprand.h>
# include <stdio.h>
#define Num_Blocks 16
#define CUDA1DBLOCK 256
/**************vector kernel function**************/
__global__
static void _add_vec(float* x, float* y, int dim, float alpha){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < dim)
x[i] += alpha * y[i];
}
__global__
static void _sumRowMat_vec(float* x, float* y, int dim, int col, size_t stride){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < dim){
x[i] = 0.0 ;
for(int m = 0; m < col; m++)
x[i] += y[i*stride + m];
}
}
__global__
static void _absumRowMat_vec(float* x, float* y, int dim, int col, size_t stride){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < dim){
x[i] = 0.0 ;
for(int m = 0; m < col; m++){
if (y[i * stride + m] > 0.0)
x[i] += y[i*stride + m];
else
x[i] -= y[i*stride + m];
}
}
}
__global__
static void _sumColMat_vec(float* x, float* y, int dim, int row, size_t stride){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < dim){
x[i] = 0.0 ;
for(int m = 0; m < row; m++)
x[i] += y[m * stride + i];
}
}
__global__
static void _max_vec(float* x, float* y, int dim){
float tmp = 0.0;
for(int j = 0; j < dim; j++)
if(tmp < x[j])
tmp = x[j];
y[0] = tmp;
}
__global__
static void _maxindex_vec(float* x, float* y, int dim){
float tmp = 0.0;
int maxidx;
for(int j = 0; j < dim; j++)
if(tmp < x[j]){
tmp = x[j];
maxidx = j;
}
y[0] = (float)maxidx;
}
__global__
static void _sum_vec(float* x, float* y, int dim){
y[0] = 0.0 ;
for(int i = 0; i < dim; i++)
y[0] += x[i] ;
}
__global__
static void _exp_vec(float* x, int dim){
int i = blockDim.x * blockIdx.x + threadIdx.x ;
if(i < dim)
x[i] = exp(x[i]) ;
}
__global__
static void _set_vec(float* x, int dim, float value){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < dim)
x[i] = value;
}
__global__
static void _scale_vec(float* x, int dim, float value){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < dim)
x[i] *= value ;
}
/******************cumatrix kernel*************/
__global__
static void _FindRowMaxId(float* x, float* y, float* index, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if((i < cols) && (j < rows)){
if(y[i] < x[i + j * stride]){
y[i] = x[i + j * stride] ;
index[j] = i ;
}
}
}
__global__
static void _ApplySoftMaxPerRow(float* x, float* y, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if(i < cols && j < rows)
x[j * stride + i] = x[j * stride + i]/y[j];
}
__global__
static void _sigmoids(float* x, float* y, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols)&&(j < rows)){
x[j*stride + i] = 1/(1+exp(-y[j*stride + i]));
}
}
__global__
static void _diffsigmoids(float* x, float* y, float* z, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols)&&(j < rows)){
x[j*stride + i] = y[j*stride + i] * (1 - y[j*stride + i]) * z[j*stride + i];
}
}
__global__
static void _relus(float* x, float* y, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols)&&(j < rows)){
x[j*stride + i] = (y[j*stride+i]>0)? y[j*stride+i] : 0;
}
}
__global__
static void _diffrelus(float* x, float* y, float* z , int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if((i < cols)&&(j < rows)){
x[j * stride + i] = ((y[ j * stride + i]>0)? 1 : 0) * z[j * stride + i];
}
}
__global__
static void _tanhs(float* x, float* y, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols)&&(j < rows)){
x[j*stride + i] = (exp(y[j * stride + i]) - exp(-y[j * stride + i]))/(exp(y[j * stride + i]) + exp(-y[j * stride + i]));
}
}
__global__
static void _difftanhs(float* x, float* y, float* z, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols)&&(j < rows)){
x[j * stride + i] = (1 - y[ j * stride + i] * y[j * stride + i] ) * z[j * stride + i];
}
}
__global__
void _Set(float* x, int cols, int rows, float value, size_t stride){
int j = blockIdx.y * blockDim.y + threadIdx.y;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if((i < cols)&&(j < rows)){
x[j * stride + i] = value;
}
}
__global__
static void _Log(float* x, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if((i < cols)&&(j < rows)){
x[ j * stride + i] = log(x[j * stride + i]);
}
}
__global__
static void _Exp(float* x, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if((i < cols)&&(j < rows)){
x[j*stride + i] = exp(x[j*stride + i]);
}
}
__global__
static void _scale(float* x, int cols, int rows, size_t stride, float value){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if((i < cols)&&(j < rows)){
x[j*stride + i] = x[j*stride + i] * value;
}
}
__global__
static void _ApplyFloor(float* x, int cols, int rows, float value, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols)&&(j < rows)){
x[j * stride + i] = (x[j * stride + i] >= value) ? x[j * stride + i] : 0;
}
}
__global__
static void _ApplyNorm(float* x, int cols, int rows, size_t stride){
int i = blockDim.x * blockIdx.x + threadIdx.x;
float max = 0.0;
for(int m = 0; m < cols; m++){
if(max < x[i * stride + m]) max = x[i * stride + m] ;
}
__syncthreads();
for(int n = 0; n < cols; n++){
x[i * stride + n] = x[ i * stride + n] / max ;
}
}
__global__
static void _ApplyHeaviside(float* x, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols) && (j < rows)){
x[j*stride + i] = (x[j* stride + i] > 0)? 1 : 0;
}
}
__global__
static void _AddVecToRows(float* x, float * y, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols) && (j < rows))
x[j*stride + i] = x[j*stride + i] + y[i];
}
__global__
static void _MulElements(float* x, float* y, int cols, int rows, size_t stride ){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols) && (j < rows)){
x[j*stride + i] = x[j*stride + i] * y[j*stride + i];
}
}
__global__
static void _addmat(float* x, float* y, int cols, int rows, size_t stride, float alpha){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols) && (j < rows)){
x[j * stride + i] += alpha * y[j * stride + i];
}
}
__global__
static void _BinarizeProbs(float* x, int cols, int rows, float* random){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if((i < rows) && (j < cols)){
x[i*cols + j] = random[i*cols + j]>0 ? 1.0 : 0.0 ;
}
}
/********************************common*************************************/
void cudaF_destory(float* x){
hipFree(x);
}
/*********************************************************************************************************************/
void cudaF_FindRowMaxId(float* x, float* y, int cols, int rows, size_t stride) {
float* index;
float* show = new float[rows];
for(int i = 0; i < rows ; i++)
show[i] = 0.1 ;
hipMemcpy(y, show, sizeof(float)*rows, hipMemcpyHostToDevice);
hipMalloc((void**)&index, sizeof(float)*rows) ;
dim3 dimBlock(Num_Blocks, Num_Blocks) ;
dim3 dimGrid((cols + Num_Blocks -1)/Num_Blocks, (rows + Num_Blocks -1)/Num_Blocks) ;
hipLaunchKernelGGL(( _FindRowMaxId), dim3(dimGrid), dim3(dimBlock), 0, 0, x, y, index, cols, rows, stride) ;
hipMemcpy(y, index, sizeof(float)*rows, hipMemcpyDeviceToDevice);
delete [] show ;
hipFree(index);
}
void cudaF_ApplySoftMaxPerRow(float* x, float* y, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _ApplySoftMaxPerRow), dim3(dimGrid), dim3(dimBlock), 0, 0, x, y, cols, rows, stride);
}
void cudaF_sigmoids(float* x, float* y, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _sigmoids), dim3(dimGrid), dim3(dimBlock), 0, 0, x , y, cols, rows, stride);
}
void cudaF_diffsigmoids(float* x, float* y, float* z, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _diffsigmoids), dim3(dimGrid), dim3(dimBlock), 0, 0, x , y, z, cols, rows, stride);
}
void cudaF_tanhs(float* x, float* y, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _tanhs), dim3(dimGrid), dim3(dimBlock), 0, 0, x , y, cols, rows, stride);
}
void cudaF_difftanhs(float* x, float* y, float* z, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _difftanhs), dim3(dimGrid), dim3(dimBlock), 0, 0, x , y, z, cols, rows, stride);
}
void cudaF_relus(float* x, float* y, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _relus), dim3(dimGrid), dim3(dimBlock), 0, 0, x , y, cols, rows, stride);
}
void cudaF_diffrelus(float* x, float* y, float* z , int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x -1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _diffrelus), dim3(dimGrid), dim3(dimBlock), 0, 0, x , y, z , cols, rows, stride);
}
void cudaF_Sets(float* x, int cols, int rows, float value, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _Set), dim3(dimGrid), dim3(dimBlock), 0, 0, x , cols, rows, value, stride);
}
void cudaF_Log(float* x, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _Log), dim3(dimGrid),dim3(dimBlock), 0, 0, x, cols, rows, stride);
}
void cudaF_Exp(float* x, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _Exp), dim3(dimGrid),dim3(dimBlock), 0, 0, x, cols, rows, stride);
}
void cudaF_scale(float* x, int cols, int rows, size_t stride, float value){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _scale), dim3(dimGrid),dim3(dimBlock), 0, 0, x, cols, rows, stride, value);
}
void cudaF_ApplyFloor(float* x, int cols, int rows, float value, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _ApplyFloor), dim3(dimGrid), dim3(dimBlock), 0, 0, x , cols, rows, value, stride);
}
void cudaF_ApplyNorm(float* x, int cols, int rows, size_t stride){
dim3 dimGrid(Num_Blocks);
dim3 dimBlock((rows + Num_Blocks -1) / Num_Blocks);
hipLaunchKernelGGL(( _ApplyNorm), dim3(dimGrid), dim3(dimBlock), 0, 0, x, cols, rows, stride);
}
void cudaF_ApplyHeaviside(float* x, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _ApplyHeaviside), dim3(dimGrid), dim3(dimBlock), 0, 0, x, cols, rows, stride);
}
void cudaF_AddVecToRows(float* x, float* y, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks) ;
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y) ;
hipLaunchKernelGGL(( _AddVecToRows), dim3(dimGrid), dim3(dimBlock), 0, 0, x , y, cols , rows, stride) ;
}
void cudaF_MulElements(float* x, float* y, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _MulElements), dim3(dimGrid), dim3(dimBlock), 0, 0, x, y, cols, rows, stride);
}
void cudaF_addmat(float* x, float* y, int cols, int rows, size_t stride, float alpha ){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
hipLaunchKernelGGL(( _addmat), dim3(dimGrid), dim3(dimBlock), 0, 0, x, y, cols, rows, stride, alpha );
}
void cudaF_BinarizeProbs(float* x, int cols, int rows, float Probs, float* random){
int blocksPerGrid = cols/Num_Blocks;
if(cols % Num_Blocks) blocksPerGrid++;
dim3 dimBlock(blocksPerGrid, blocksPerGrid);
dim3 dimGrid(Num_Blocks, Num_Blocks);
hipLaunchKernelGGL(( _BinarizeProbs), dim3(dimGrid), dim3(dimBlock), 0, 0, x, cols, rows, random);
}
/*****************cuvector function**************/
//there should take care of the dim of vector!
void cudaF_add_vec(float* x, float* y, int dim, float alpha){
dim3 dimGrid(Num_Blocks);
dim3 dimBlock(ceil(dim/Num_Blocks)+1);
hipLaunchKernelGGL(( _add_vec), dim3(dimGrid), dim3(dimBlock), 0, 0, x, y, dim, alpha);
}
void cudaF_sumRowMat_vec(float* x, float* y, int dim, int col, size_t stride){
dim3 dimGrid(Num_Blocks);
dim3 dimBlock((dim + Num_Blocks -1) / Num_Blocks);
hipLaunchKernelGGL(( _sumRowMat_vec), dim3(dimGrid), dim3(dimBlock), 0, 0, x, y, dim, col, stride);
}
void cudaF_AbsumRowMat_vec(float* x, float* y, int dim, int col, size_t stride){
dim3 dimGrid(Num_Blocks);
dim3 dimBlock((dim + Num_Blocks -1) / Num_Blocks);
hipLaunchKernelGGL(( _absumRowMat_vec), dim3(dimGrid), dim3(dimBlock), 0, 0, x, y, dim, col, stride);
}
void cudaF_sumColMat_vec(float* x, float* y, int dim, int row, size_t stride){
dim3 dimGrid((dim+CUDA1DBLOCK-1)/CUDA1DBLOCK);
dim3 dimBlock(CUDA1DBLOCK);
hipLaunchKernelGGL(( _sumColMat_vec), dim3(dimGrid), dim3(dimBlock), 0, 0, x, y, dim, row, stride);
}
void cudaF_max_vec(float* x, float* y, int dim){
hipLaunchKernelGGL(( _max_vec), dim3(1),dim3(1), 0, 0, x, y, dim);
}
void cudaF_maxindex_vec(float* x, float* y, int dim){
hipLaunchKernelGGL(( _maxindex_vec), dim3(1),dim3(1), 0, 0, x, y, dim);
}
void cudaF_sum_vec(float* x, float*y, int dim){
hipLaunchKernelGGL(( _sum_vec), dim3(1),dim3(1), 0, 0, x, y, dim);
}
void cudaF_exp_vec(float* x, int dim){
dim3 dimGrid((dim + Num_Blocks*Num_Blocks - 1) / (Num_Blocks*Num_Blocks));
dim3 dimBlock(Num_Blocks*Num_Blocks);
hipLaunchKernelGGL(( _exp_vec), dim3(dimGrid), dim3(dimBlock), 0, 0, x , dim);
}
void cudaF_set_vec(float* x, int dim, float value){
dim3 dimGrid((dim + Num_Blocks*Num_Blocks - 1) / (Num_Blocks*Num_Blocks));
dim3 dimBlock(Num_Blocks*Num_Blocks);
hipLaunchKernelGGL(( _set_vec), dim3(dimGrid), dim3(dimBlock), 0, 0, x, dim, value);
}
void cudaF_scale_vec(float* x, int dim, float value){
dim3 dimGrid((dim + Num_Blocks*Num_Blocks - 1) / (Num_Blocks*Num_Blocks));
dim3 dimBlock(Num_Blocks*Num_Blocks);
hipLaunchKernelGGL(( _scale_vec), dim3(dimGrid), dim3(dimBlock), 0, 0, x,dim,value);
}
|
a68fc89ecfaa28ec9ee02ca0dc3a3e89a18e1892.cu
|
//cuMatrix/cuKernel.h
// Copyright 2015-2-2 (Author: xutao)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
# include <math.h>
# include "cuKernel-ansi.h"
# include <cuda_runtime.h>
# include <cublas.h>
# include <cuda.h>
# include <curand.h>
# include <stdio.h>
#define Num_Blocks 16
#define CUDA1DBLOCK 256
/**************vector kernel function**************/
__global__
static void _add_vec(float* x, float* y, int dim, float alpha){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < dim)
x[i] += alpha * y[i];
}
__global__
static void _sumRowMat_vec(float* x, float* y, int dim, int col, size_t stride){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < dim){
x[i] = 0.0 ;
for(int m = 0; m < col; m++)
x[i] += y[i*stride + m];
}
}
__global__
static void _absumRowMat_vec(float* x, float* y, int dim, int col, size_t stride){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < dim){
x[i] = 0.0 ;
for(int m = 0; m < col; m++){
if (y[i * stride + m] > 0.0)
x[i] += y[i*stride + m];
else
x[i] -= y[i*stride + m];
}
}
}
__global__
static void _sumColMat_vec(float* x, float* y, int dim, int row, size_t stride){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < dim){
x[i] = 0.0 ;
for(int m = 0; m < row; m++)
x[i] += y[m * stride + i];
}
}
__global__
static void _max_vec(float* x, float* y, int dim){
float tmp = 0.0;
for(int j = 0; j < dim; j++)
if(tmp < x[j])
tmp = x[j];
y[0] = tmp;
}
__global__
static void _maxindex_vec(float* x, float* y, int dim){
float tmp = 0.0;
int maxidx;
for(int j = 0; j < dim; j++)
if(tmp < x[j]){
tmp = x[j];
maxidx = j;
}
y[0] = (float)maxidx;
}
__global__
static void _sum_vec(float* x, float* y, int dim){
y[0] = 0.0 ;
for(int i = 0; i < dim; i++)
y[0] += x[i] ;
}
__global__
static void _exp_vec(float* x, int dim){
int i = blockDim.x * blockIdx.x + threadIdx.x ;
if(i < dim)
x[i] = exp(x[i]) ;
}
__global__
static void _set_vec(float* x, int dim, float value){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < dim)
x[i] = value;
}
__global__
static void _scale_vec(float* x, int dim, float value){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < dim)
x[i] *= value ;
}
/******************cumatrix kernel*************/
__global__
static void _FindRowMaxId(float* x, float* y, float* index, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if((i < cols) && (j < rows)){
if(y[i] < x[i + j * stride]){
y[i] = x[i + j * stride] ;
index[j] = i ;
}
}
}
__global__
static void _ApplySoftMaxPerRow(float* x, float* y, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if(i < cols && j < rows)
x[j * stride + i] = x[j * stride + i]/y[j];
}
__global__
static void _sigmoids(float* x, float* y, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols)&&(j < rows)){
x[j*stride + i] = 1/(1+exp(-y[j*stride + i]));
}
}
__global__
static void _diffsigmoids(float* x, float* y, float* z, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols)&&(j < rows)){
x[j*stride + i] = y[j*stride + i] * (1 - y[j*stride + i]) * z[j*stride + i];
}
}
__global__
static void _relus(float* x, float* y, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols)&&(j < rows)){
x[j*stride + i] = (y[j*stride+i]>0)? y[j*stride+i] : 0;
}
}
__global__
static void _diffrelus(float* x, float* y, float* z , int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if((i < cols)&&(j < rows)){
x[j * stride + i] = ((y[ j * stride + i]>0)? 1 : 0) * z[j * stride + i];
}
}
__global__
static void _tanhs(float* x, float* y, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols)&&(j < rows)){
x[j*stride + i] = (exp(y[j * stride + i]) - exp(-y[j * stride + i]))/(exp(y[j * stride + i]) + exp(-y[j * stride + i]));
}
}
__global__
static void _difftanhs(float* x, float* y, float* z, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols)&&(j < rows)){
x[j * stride + i] = (1 - y[ j * stride + i] * y[j * stride + i] ) * z[j * stride + i];
}
}
__global__
void _Set(float* x, int cols, int rows, float value, size_t stride){
int j = blockIdx.y * blockDim.y + threadIdx.y;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if((i < cols)&&(j < rows)){
x[j * stride + i] = value;
}
}
__global__
static void _Log(float* x, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if((i < cols)&&(j < rows)){
x[ j * stride + i] = log(x[j * stride + i]);
}
}
__global__
static void _Exp(float* x, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if((i < cols)&&(j < rows)){
x[j*stride + i] = exp(x[j*stride + i]);
}
}
__global__
static void _scale(float* x, int cols, int rows, size_t stride, float value){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if((i < cols)&&(j < rows)){
x[j*stride + i] = x[j*stride + i] * value;
}
}
__global__
static void _ApplyFloor(float* x, int cols, int rows, float value, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols)&&(j < rows)){
x[j * stride + i] = (x[j * stride + i] >= value) ? x[j * stride + i] : 0;
}
}
__global__
static void _ApplyNorm(float* x, int cols, int rows, size_t stride){
int i = blockDim.x * blockIdx.x + threadIdx.x;
float max = 0.0;
for(int m = 0; m < cols; m++){
if(max < x[i * stride + m]) max = x[i * stride + m] ;
}
__syncthreads();
for(int n = 0; n < cols; n++){
x[i * stride + n] = x[ i * stride + n] / max ;
}
}
__global__
static void _ApplyHeaviside(float* x, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols) && (j < rows)){
x[j*stride + i] = (x[j* stride + i] > 0)? 1 : 0;
}
}
__global__
static void _AddVecToRows(float* x, float * y, int cols, int rows, size_t stride){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols) && (j < rows))
x[j*stride + i] = x[j*stride + i] + y[i];
}
__global__
static void _MulElements(float* x, float* y, int cols, int rows, size_t stride ){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols) && (j < rows)){
x[j*stride + i] = x[j*stride + i] * y[j*stride + i];
}
}
__global__
static void _addmat(float* x, float* y, int cols, int rows, size_t stride, float alpha){
int j = blockIdx.y*blockDim.y + threadIdx.y ;
int i = blockIdx.x*blockDim.x + threadIdx.x ;
if((i < cols) && (j < rows)){
x[j * stride + i] += alpha * y[j * stride + i];
}
}
__global__
static void _BinarizeProbs(float* x, int cols, int rows, float* random){
int j = blockIdx.y*blockDim.y + threadIdx.y;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if((i < rows) && (j < cols)){
x[i*cols + j] = random[i*cols + j]>0 ? 1.0 : 0.0 ;
}
}
/********************************common*************************************/
void cudaF_destory(float* x){
cudaFree(x);
}
/*********************************************************************************************************************/
void cudaF_FindRowMaxId(float* x, float* y, int cols, int rows, size_t stride) {
float* index;
float* show = new float[rows];
for(int i = 0; i < rows ; i++)
show[i] = 0.1 ;
cudaMemcpy(y, show, sizeof(float)*rows, cudaMemcpyHostToDevice);
cudaMalloc((void**)&index, sizeof(float)*rows) ;
dim3 dimBlock(Num_Blocks, Num_Blocks) ;
dim3 dimGrid((cols + Num_Blocks -1)/Num_Blocks, (rows + Num_Blocks -1)/Num_Blocks) ;
_FindRowMaxId<<<dimGrid, dimBlock>>>(x, y, index, cols, rows, stride) ;
cudaMemcpy(y, index, sizeof(float)*rows, cudaMemcpyDeviceToDevice);
delete [] show ;
cudaFree(index);
}
void cudaF_ApplySoftMaxPerRow(float* x, float* y, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_ApplySoftMaxPerRow<<<dimGrid, dimBlock>>>(x, y, cols, rows, stride);
}
void cudaF_sigmoids(float* x, float* y, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_sigmoids<<<dimGrid, dimBlock>>> (x , y, cols, rows, stride);
}
void cudaF_diffsigmoids(float* x, float* y, float* z, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_diffsigmoids<<<dimGrid, dimBlock>>> (x , y, z, cols, rows, stride);
}
void cudaF_tanhs(float* x, float* y, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_tanhs<<<dimGrid, dimBlock>>> (x , y, cols, rows, stride);
}
void cudaF_difftanhs(float* x, float* y, float* z, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_difftanhs<<<dimGrid, dimBlock>>> (x , y, z, cols, rows, stride);
}
void cudaF_relus(float* x, float* y, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_relus<<<dimGrid, dimBlock>>> (x , y, cols, rows, stride);
}
void cudaF_diffrelus(float* x, float* y, float* z , int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x -1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_diffrelus<<<dimGrid, dimBlock>>> (x , y, z , cols, rows, stride);
}
void cudaF_Sets(float* x, int cols, int rows, float value, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_Set<<<dimGrid, dimBlock>>> (x , cols, rows, value, stride);
}
void cudaF_Log(float* x, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_Log<<<dimGrid,dimBlock>>>(x, cols, rows, stride);
}
void cudaF_Exp(float* x, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_Exp<<<dimGrid,dimBlock>>>(x, cols, rows, stride);
}
void cudaF_scale(float* x, int cols, int rows, size_t stride, float value){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_scale<<<dimGrid,dimBlock>>>(x, cols, rows, stride, value);
}
void cudaF_ApplyFloor(float* x, int cols, int rows, float value, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_ApplyFloor<<<dimGrid, dimBlock>>> (x , cols, rows, value, stride);
}
void cudaF_ApplyNorm(float* x, int cols, int rows, size_t stride){
dim3 dimGrid(Num_Blocks);
dim3 dimBlock((rows + Num_Blocks -1) / Num_Blocks);
_ApplyNorm<<<dimGrid, dimBlock>>> (x, cols, rows, stride);
}
void cudaF_ApplyHeaviside(float* x, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_ApplyHeaviside<<<dimGrid, dimBlock>>>(x, cols, rows, stride);
}
void cudaF_AddVecToRows(float* x, float* y, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks) ;
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y) ;
_AddVecToRows<<<dimGrid, dimBlock>>>(x , y, cols , rows, stride) ;
}
void cudaF_MulElements(float* x, float* y, int cols, int rows, size_t stride){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_MulElements<<<dimGrid, dimBlock>>> (x, y, cols, rows, stride);
}
void cudaF_addmat(float* x, float* y, int cols, int rows, size_t stride, float alpha ){
dim3 dimBlock(Num_Blocks, Num_Blocks);
dim3 dimGrid((cols + dimBlock.x - 1)/dimBlock.x , (rows + dimBlock.y - 1)/dimBlock.y);
_addmat<<<dimGrid, dimBlock>>>(x, y, cols, rows, stride, alpha );
}
void cudaF_BinarizeProbs(float* x, int cols, int rows, float Probs, float* random){
int blocksPerGrid = cols/Num_Blocks;
if(cols % Num_Blocks) blocksPerGrid++;
dim3 dimBlock(blocksPerGrid, blocksPerGrid);
dim3 dimGrid(Num_Blocks, Num_Blocks);
_BinarizeProbs<<<dimGrid, dimBlock>>> (x, cols, rows, random);
}
/*****************cuvector function**************/
//there should take care of the dim of vector!
void cudaF_add_vec(float* x, float* y, int dim, float alpha){
dim3 dimGrid(Num_Blocks);
dim3 dimBlock(ceil(dim/Num_Blocks)+1);
_add_vec<<<dimGrid, dimBlock>>>(x, y, dim, alpha);
}
void cudaF_sumRowMat_vec(float* x, float* y, int dim, int col, size_t stride){
dim3 dimGrid(Num_Blocks);
dim3 dimBlock((dim + Num_Blocks -1) / Num_Blocks);
_sumRowMat_vec<<<dimGrid, dimBlock>>>(x, y, dim, col, stride);
}
void cudaF_AbsumRowMat_vec(float* x, float* y, int dim, int col, size_t stride){
dim3 dimGrid(Num_Blocks);
dim3 dimBlock((dim + Num_Blocks -1) / Num_Blocks);
_absumRowMat_vec<<<dimGrid, dimBlock>>>(x, y, dim, col, stride);
}
void cudaF_sumColMat_vec(float* x, float* y, int dim, int row, size_t stride){
dim3 dimGrid((dim+CUDA1DBLOCK-1)/CUDA1DBLOCK);
dim3 dimBlock(CUDA1DBLOCK);
_sumColMat_vec<<<dimGrid, dimBlock>>>(x, y, dim, row, stride);
}
void cudaF_max_vec(float* x, float* y, int dim){
_max_vec<<<1,1>>>(x, y, dim);
}
void cudaF_maxindex_vec(float* x, float* y, int dim){
_maxindex_vec<<<1,1>>>(x, y, dim);
}
void cudaF_sum_vec(float* x, float*y, int dim){
_sum_vec<<<1,1>>>(x, y, dim);
}
void cudaF_exp_vec(float* x, int dim){
dim3 dimGrid((dim + Num_Blocks*Num_Blocks - 1) / (Num_Blocks*Num_Blocks));
dim3 dimBlock(Num_Blocks*Num_Blocks);
_exp_vec<<<dimGrid, dimBlock>>> (x , dim);
}
void cudaF_set_vec(float* x, int dim, float value){
dim3 dimGrid((dim + Num_Blocks*Num_Blocks - 1) / (Num_Blocks*Num_Blocks));
dim3 dimBlock(Num_Blocks*Num_Blocks);
_set_vec<<<dimGrid, dimBlock>>> (x, dim, value);
}
void cudaF_scale_vec(float* x, int dim, float value){
dim3 dimGrid((dim + Num_Blocks*Num_Blocks - 1) / (Num_Blocks*Num_Blocks));
dim3 dimBlock(Num_Blocks*Num_Blocks);
_scale_vec<<<dimGrid, dimBlock>>> (x,dim,value);
}
|
dc08a9bee07ad432d7025da9db436da36262079d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "Particles.h"
#include "Alloc.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
/** allocate particle arrays */
void particle_allocate(struct parameters* param, struct particles* part, int is)
{
// set species ID
part->species_ID = is;
// number of particles
part->nop = param->np[is];
// maximum number of particles
part->npmax = param->npMax[is];
// choose a different number of mover iterations for ions and electrons
if (param->qom[is] < 0){ //electrons
part->NiterMover = param->NiterMover;
part->n_sub_cycles = param->n_sub_cycles;
} else { // ions: only one iteration
part->NiterMover = 1;
part->n_sub_cycles = 1;
}
// particles per cell
part->npcelx = param->npcelx[is];
part->npcely = param->npcely[is];
part->npcelz = param->npcelz[is];
part->npcel = part->npcelx*part->npcely*part->npcelz;
// cast it to required precision
part->qom = (FPpart) param->qom[is];
long npmax = part->npmax;
// initialize drift and thermal velocities
// drift
part->u0 = (FPpart) param->u0[is];
part->v0 = (FPpart) param->v0[is];
part->w0 = (FPpart) param->w0[is];
// thermal
part->uth = (FPpart) param->uth[is];
part->vth = (FPpart) param->vth[is];
part->wth = (FPpart) param->wth[is];
//////////////////////////////
/// ALLOCATION PARTICLE ARRAYS
//////////////////////////////
part->x = new FPpart[npmax];
part->y = new FPpart[npmax];
part->z = new FPpart[npmax];
// allocate velocity
part->u = new FPpart[npmax];
part->v = new FPpart[npmax];
part->w = new FPpart[npmax];
// allocate charge = q * statistical weight
part->q = new FPinterp[npmax];
}
/** deallocate */
void particle_deallocate(struct particles* part)
{
// deallocate particle variables
delete[] part->x;
delete[] part->y;
delete[] part->z;
delete[] part->u;
delete[] part->v;
delete[] part->w;
delete[] part->q;
}
/** particle mover */
int mover_PC(struct particles* part, struct EMfield* field, struct grid* grd, struct parameters* param)
{
// print species and subcycling
std::cout << "*** MOVER with SUBCYCLYING "<< param->n_sub_cycles << " - species " << part->species_ID << " ***" << std::endl;
// auxiliary variables
FPpart dt_sub_cycling = (FPpart) param->dt/((double) part->n_sub_cycles);
FPpart dto2 = .5*dt_sub_cycling, qomdt2 = part->qom*dto2/param->c;
FPpart omdtsq, denom, ut, vt, wt, udotb;
// local (to the particle) electric and magnetic field
FPfield Exl=0.0, Eyl=0.0, Ezl=0.0, Bxl=0.0, Byl=0.0, Bzl=0.0;
// interpolation densities
int ix,iy,iz;
FPfield weight[2][2][2];
FPfield xi[2], eta[2], zeta[2];
// intermediate particle position and velocity
FPpart xptilde, yptilde, zptilde, uptilde, vptilde, wptilde;
// start subcycling
for (int i_sub=0; i_sub < part->n_sub_cycles; i_sub++){
// move each particle with new fields
for (int i=0; i < part->nop; i++){
xptilde = part->x[i];
yptilde = part->y[i];
zptilde = part->z[i];
// calculate the average velocity iteratively
for(int innter=0; innter < part->NiterMover; innter++){
// interpolation G-->P
ix = 2 + int((part->x[i] - grd->xStart)*grd->invdx);
iy = 2 + int((part->y[i] - grd->yStart)*grd->invdy);
iz = 2 + int((part->z[i] - grd->zStart)*grd->invdz);
// calculate weights
xi[0] = part->x[i] - grd->XN[ix - 1][iy][iz];
eta[0] = part->y[i] - grd->YN[ix][iy - 1][iz];
zeta[0] = part->z[i] - grd->ZN[ix][iy][iz - 1];
xi[1] = grd->XN[ix][iy][iz] - part->x[i];
eta[1] = grd->YN[ix][iy][iz] - part->y[i];
zeta[1] = grd->ZN[ix][iy][iz] - part->z[i];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
weight[i][j][k] = xi[i] * eta[j] * zeta[k] * grd->invVOL;
// set to zero local electric and magnetic field
Exl=0.0, Eyl = 0.0, Ezl = 0.0, Bxl = 0.0, Byl = 0.0, Bzl = 0.0;
for (int ii=0; ii < 2; ii++)
for (int jj=0; jj < 2; jj++)
for(int kk=0; kk < 2; kk++){
Exl += weight[ii][jj][kk]*field->Ex[ix- ii][iy -jj][iz- kk ];
Eyl += weight[ii][jj][kk]*field->Ey[ix- ii][iy -jj][iz- kk ];
Ezl += weight[ii][jj][kk]*field->Ez[ix- ii][iy -jj][iz -kk ];
Bxl += weight[ii][jj][kk]*field->Bxn[ix- ii][iy -jj][iz -kk ];
Byl += weight[ii][jj][kk]*field->Byn[ix- ii][iy -jj][iz -kk ];
Bzl += weight[ii][jj][kk]*field->Bzn[ix- ii][iy -jj][iz -kk ];
}
// end interpolation
omdtsq = qomdt2*qomdt2*(Bxl*Bxl+Byl*Byl+Bzl*Bzl);
denom = 1.0/(1.0 + omdtsq);
// solve the position equation
ut= part->u[i] + qomdt2*Exl;
vt= part->v[i] + qomdt2*Eyl;
wt= part->w[i] + qomdt2*Ezl;
udotb = ut*Bxl + vt*Byl + wt*Bzl;
// solve the velocity equation
uptilde = (ut+qomdt2*(vt*Bzl -wt*Byl + qomdt2*udotb*Bxl))*denom;
vptilde = (vt+qomdt2*(wt*Bxl -ut*Bzl + qomdt2*udotb*Byl))*denom;
wptilde = (wt+qomdt2*(ut*Byl -vt*Bxl + qomdt2*udotb*Bzl))*denom;
// update position
part->x[i] = xptilde + uptilde*dto2;
part->y[i] = yptilde + vptilde*dto2;
part->z[i] = zptilde + wptilde*dto2;
} // end of iteration
// update the final position and velocity
part->u[i]= 2.0*uptilde - part->u[i];
part->v[i]= 2.0*vptilde - part->v[i];
part->w[i]= 2.0*wptilde - part->w[i];
part->x[i] = xptilde + uptilde*dt_sub_cycling;
part->y[i] = yptilde + vptilde*dt_sub_cycling;
part->z[i] = zptilde + wptilde*dt_sub_cycling;
//////////
//////////
////////// BC
// X-DIRECTION: BC particles
if (part->x[i] > grd->Lx){
if (param->PERIODICX==true){ // PERIODIC
part->x[i] = part->x[i] - grd->Lx;
} else { // REFLECTING BC
part->u[i] = -part->u[i];
part->x[i] = 2*grd->Lx - part->x[i];
}
}
if (part->x[i] < 0){
if (param->PERIODICX==true){ // PERIODIC
part->x[i] = part->x[i] + grd->Lx;
} else { // REFLECTING BC
part->u[i] = -part->u[i];
part->x[i] = -part->x[i];
}
}
// Y-DIRECTION: BC particles
if (part->y[i] > grd->Ly){
if (param->PERIODICY==true){ // PERIODIC
part->y[i] = part->y[i] - grd->Ly;
} else { // REFLECTING BC
part->v[i] = -part->v[i];
part->y[i] = 2*grd->Ly - part->y[i];
}
}
if (part->y[i] < 0){
if (param->PERIODICY==true){ // PERIODIC
part->y[i] = part->y[i] + grd->Ly;
} else { // REFLECTING BC
part->v[i] = -part->v[i];
part->y[i] = -part->y[i];
}
}
// Z-DIRECTION: BC particles
if (part->z[i] > grd->Lz){
if (param->PERIODICZ==true){ // PERIODIC
part->z[i] = part->z[i] - grd->Lz;
} else { // REFLECTING BC
part->w[i] = -part->w[i];
part->z[i] = 2*grd->Lz - part->z[i];
}
}
if (part->z[i] < 0){
if (param->PERIODICZ==true){ // PERIODIC
part->z[i] = part->z[i] + grd->Lz;
} else { // REFLECTING BC
part->w[i] = -part->w[i];
part->z[i] = -part->z[i];
}
}
} // end of subcycling
} // end of one particle
return(0); // exit succcesfully
} // end of the mover
/** Interpolation Particle --> Grid: This is for species */
void interpP2G(struct particles* part, struct interpDensSpecies* ids, struct grid* grd)
{
// arrays needed for interpolation
FPpart weight[2][2][2];
FPpart temp[2][2][2];
FPpart xi[2], eta[2], zeta[2];
// index of the cell
int ix, iy, iz;
for (register long long i = 0; i < part->nop; i++) {
// determine cell: can we change to int()? is it faster?
ix = 2 + int (floor((part->x[i] - grd->xStart) * grd->invdx));
iy = 2 + int (floor((part->y[i] - grd->yStart) * grd->invdy));
iz = 2 + int (floor((part->z[i] - grd->zStart) * grd->invdz));
// distances from node
xi[0] = part->x[i] - grd->XN[ix - 1][iy][iz];
eta[0] = part->y[i] - grd->YN[ix][iy - 1][iz];
zeta[0] = part->z[i] - grd->ZN[ix][iy][iz - 1];
xi[1] = grd->XN[ix][iy][iz] - part->x[i];
eta[1] = grd->YN[ix][iy][iz] - part->y[i];
zeta[1] = grd->ZN[ix][iy][iz] - part->z[i];
// calculate the weights for different nodes
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
weight[ii][jj][kk] = part->q[i] * xi[ii] * eta[jj] * zeta[kk] * grd->invVOL;
//////////////////////////
// add charge density
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->rhon[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
////////////////////////////
// add current density - Jx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->Jx[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
////////////////////////////
// add current density - Jy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->Jy[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
////////////////////////////
// add current density - Jz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->w[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->Jz[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
////////////////////////////
// add pressure pxx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->u[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->pxx[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
////////////////////////////
// add pressure pxy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->v[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->pxy[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
/////////////////////////////
// add pressure pxz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->w[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->pxz[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
/////////////////////////////
// add pressure pyy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * part->v[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->pyy[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
/////////////////////////////
// add pressure pyz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * part->w[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->pyz[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
/////////////////////////////
// add pressure pzz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->w[i] * part->w[i] * weight[ii][jj][kk];
for (int i=0; i < 2; i++)
for (int j=0; j < 2; j++)
for(int k=0; k < 2; k++)
ids->pzz[ix -i][iy -j][iz - k] += weight[i][j][k] * grd->invVOL;
}
}
|
dc08a9bee07ad432d7025da9db436da36262079d.cu
|
#include "Particles.h"
#include "Alloc.h"
#include <cuda.h>
#include <cuda_runtime.h>
/** allocate particle arrays */
void particle_allocate(struct parameters* param, struct particles* part, int is)
{
// set species ID
part->species_ID = is;
// number of particles
part->nop = param->np[is];
// maximum number of particles
part->npmax = param->npMax[is];
// choose a different number of mover iterations for ions and electrons
if (param->qom[is] < 0){ //electrons
part->NiterMover = param->NiterMover;
part->n_sub_cycles = param->n_sub_cycles;
} else { // ions: only one iteration
part->NiterMover = 1;
part->n_sub_cycles = 1;
}
// particles per cell
part->npcelx = param->npcelx[is];
part->npcely = param->npcely[is];
part->npcelz = param->npcelz[is];
part->npcel = part->npcelx*part->npcely*part->npcelz;
// cast it to required precision
part->qom = (FPpart) param->qom[is];
long npmax = part->npmax;
// initialize drift and thermal velocities
// drift
part->u0 = (FPpart) param->u0[is];
part->v0 = (FPpart) param->v0[is];
part->w0 = (FPpart) param->w0[is];
// thermal
part->uth = (FPpart) param->uth[is];
part->vth = (FPpart) param->vth[is];
part->wth = (FPpart) param->wth[is];
//////////////////////////////
/// ALLOCATION PARTICLE ARRAYS
//////////////////////////////
part->x = new FPpart[npmax];
part->y = new FPpart[npmax];
part->z = new FPpart[npmax];
// allocate velocity
part->u = new FPpart[npmax];
part->v = new FPpart[npmax];
part->w = new FPpart[npmax];
// allocate charge = q * statistical weight
part->q = new FPinterp[npmax];
}
/** deallocate */
void particle_deallocate(struct particles* part)
{
// deallocate particle variables
delete[] part->x;
delete[] part->y;
delete[] part->z;
delete[] part->u;
delete[] part->v;
delete[] part->w;
delete[] part->q;
}
/** particle mover */
int mover_PC(struct particles* part, struct EMfield* field, struct grid* grd, struct parameters* param)
{
// print species and subcycling
std::cout << "*** MOVER with SUBCYCLYING "<< param->n_sub_cycles << " - species " << part->species_ID << " ***" << std::endl;
// auxiliary variables
FPpart dt_sub_cycling = (FPpart) param->dt/((double) part->n_sub_cycles);
FPpart dto2 = .5*dt_sub_cycling, qomdt2 = part->qom*dto2/param->c;
FPpart omdtsq, denom, ut, vt, wt, udotb;
// local (to the particle) electric and magnetic field
FPfield Exl=0.0, Eyl=0.0, Ezl=0.0, Bxl=0.0, Byl=0.0, Bzl=0.0;
// interpolation densities
int ix,iy,iz;
FPfield weight[2][2][2];
FPfield xi[2], eta[2], zeta[2];
// intermediate particle position and velocity
FPpart xptilde, yptilde, zptilde, uptilde, vptilde, wptilde;
// start subcycling
for (int i_sub=0; i_sub < part->n_sub_cycles; i_sub++){
// move each particle with new fields
for (int i=0; i < part->nop; i++){
xptilde = part->x[i];
yptilde = part->y[i];
zptilde = part->z[i];
// calculate the average velocity iteratively
for(int innter=0; innter < part->NiterMover; innter++){
// interpolation G-->P
ix = 2 + int((part->x[i] - grd->xStart)*grd->invdx);
iy = 2 + int((part->y[i] - grd->yStart)*grd->invdy);
iz = 2 + int((part->z[i] - grd->zStart)*grd->invdz);
// calculate weights
xi[0] = part->x[i] - grd->XN[ix - 1][iy][iz];
eta[0] = part->y[i] - grd->YN[ix][iy - 1][iz];
zeta[0] = part->z[i] - grd->ZN[ix][iy][iz - 1];
xi[1] = grd->XN[ix][iy][iz] - part->x[i];
eta[1] = grd->YN[ix][iy][iz] - part->y[i];
zeta[1] = grd->ZN[ix][iy][iz] - part->z[i];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
weight[i][j][k] = xi[i] * eta[j] * zeta[k] * grd->invVOL;
// set to zero local electric and magnetic field
Exl=0.0, Eyl = 0.0, Ezl = 0.0, Bxl = 0.0, Byl = 0.0, Bzl = 0.0;
for (int ii=0; ii < 2; ii++)
for (int jj=0; jj < 2; jj++)
for(int kk=0; kk < 2; kk++){
Exl += weight[ii][jj][kk]*field->Ex[ix- ii][iy -jj][iz- kk ];
Eyl += weight[ii][jj][kk]*field->Ey[ix- ii][iy -jj][iz- kk ];
Ezl += weight[ii][jj][kk]*field->Ez[ix- ii][iy -jj][iz -kk ];
Bxl += weight[ii][jj][kk]*field->Bxn[ix- ii][iy -jj][iz -kk ];
Byl += weight[ii][jj][kk]*field->Byn[ix- ii][iy -jj][iz -kk ];
Bzl += weight[ii][jj][kk]*field->Bzn[ix- ii][iy -jj][iz -kk ];
}
// end interpolation
omdtsq = qomdt2*qomdt2*(Bxl*Bxl+Byl*Byl+Bzl*Bzl);
denom = 1.0/(1.0 + omdtsq);
// solve the position equation
ut= part->u[i] + qomdt2*Exl;
vt= part->v[i] + qomdt2*Eyl;
wt= part->w[i] + qomdt2*Ezl;
udotb = ut*Bxl + vt*Byl + wt*Bzl;
// solve the velocity equation
uptilde = (ut+qomdt2*(vt*Bzl -wt*Byl + qomdt2*udotb*Bxl))*denom;
vptilde = (vt+qomdt2*(wt*Bxl -ut*Bzl + qomdt2*udotb*Byl))*denom;
wptilde = (wt+qomdt2*(ut*Byl -vt*Bxl + qomdt2*udotb*Bzl))*denom;
// update position
part->x[i] = xptilde + uptilde*dto2;
part->y[i] = yptilde + vptilde*dto2;
part->z[i] = zptilde + wptilde*dto2;
} // end of iteration
// update the final position and velocity
part->u[i]= 2.0*uptilde - part->u[i];
part->v[i]= 2.0*vptilde - part->v[i];
part->w[i]= 2.0*wptilde - part->w[i];
part->x[i] = xptilde + uptilde*dt_sub_cycling;
part->y[i] = yptilde + vptilde*dt_sub_cycling;
part->z[i] = zptilde + wptilde*dt_sub_cycling;
//////////
//////////
////////// BC
// X-DIRECTION: BC particles
if (part->x[i] > grd->Lx){
if (param->PERIODICX==true){ // PERIODIC
part->x[i] = part->x[i] - grd->Lx;
} else { // REFLECTING BC
part->u[i] = -part->u[i];
part->x[i] = 2*grd->Lx - part->x[i];
}
}
if (part->x[i] < 0){
if (param->PERIODICX==true){ // PERIODIC
part->x[i] = part->x[i] + grd->Lx;
} else { // REFLECTING BC
part->u[i] = -part->u[i];
part->x[i] = -part->x[i];
}
}
// Y-DIRECTION: BC particles
if (part->y[i] > grd->Ly){
if (param->PERIODICY==true){ // PERIODIC
part->y[i] = part->y[i] - grd->Ly;
} else { // REFLECTING BC
part->v[i] = -part->v[i];
part->y[i] = 2*grd->Ly - part->y[i];
}
}
if (part->y[i] < 0){
if (param->PERIODICY==true){ // PERIODIC
part->y[i] = part->y[i] + grd->Ly;
} else { // REFLECTING BC
part->v[i] = -part->v[i];
part->y[i] = -part->y[i];
}
}
// Z-DIRECTION: BC particles
if (part->z[i] > grd->Lz){
if (param->PERIODICZ==true){ // PERIODIC
part->z[i] = part->z[i] - grd->Lz;
} else { // REFLECTING BC
part->w[i] = -part->w[i];
part->z[i] = 2*grd->Lz - part->z[i];
}
}
if (part->z[i] < 0){
if (param->PERIODICZ==true){ // PERIODIC
part->z[i] = part->z[i] + grd->Lz;
} else { // REFLECTING BC
part->w[i] = -part->w[i];
part->z[i] = -part->z[i];
}
}
} // end of subcycling
} // end of one particle
return(0); // exit succcesfully
} // end of the mover
/** Interpolation Particle --> Grid: This is for species */
void interpP2G(struct particles* part, struct interpDensSpecies* ids, struct grid* grd)
{
// arrays needed for interpolation
FPpart weight[2][2][2];
FPpart temp[2][2][2];
FPpart xi[2], eta[2], zeta[2];
// index of the cell
int ix, iy, iz;
for (register long long i = 0; i < part->nop; i++) {
// determine cell: can we change to int()? is it faster?
ix = 2 + int (floor((part->x[i] - grd->xStart) * grd->invdx));
iy = 2 + int (floor((part->y[i] - grd->yStart) * grd->invdy));
iz = 2 + int (floor((part->z[i] - grd->zStart) * grd->invdz));
// distances from node
xi[0] = part->x[i] - grd->XN[ix - 1][iy][iz];
eta[0] = part->y[i] - grd->YN[ix][iy - 1][iz];
zeta[0] = part->z[i] - grd->ZN[ix][iy][iz - 1];
xi[1] = grd->XN[ix][iy][iz] - part->x[i];
eta[1] = grd->YN[ix][iy][iz] - part->y[i];
zeta[1] = grd->ZN[ix][iy][iz] - part->z[i];
// calculate the weights for different nodes
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
weight[ii][jj][kk] = part->q[i] * xi[ii] * eta[jj] * zeta[kk] * grd->invVOL;
//////////////////////////
// add charge density
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->rhon[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
////////////////////////////
// add current density - Jx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->Jx[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
////////////////////////////
// add current density - Jy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->Jy[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
////////////////////////////
// add current density - Jz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->w[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->Jz[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
////////////////////////////
// add pressure pxx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->u[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->pxx[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
////////////////////////////
// add pressure pxy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->v[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->pxy[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
/////////////////////////////
// add pressure pxz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->w[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->pxz[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
/////////////////////////////
// add pressure pyy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * part->v[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->pyy[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
/////////////////////////////
// add pressure pyz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * part->w[i] * weight[ii][jj][kk];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
ids->pyz[ix - i][iy - j][iz - k] += weight[i][j][k] * grd->invVOL;
/////////////////////////////
// add pressure pzz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->w[i] * part->w[i] * weight[ii][jj][kk];
for (int i=0; i < 2; i++)
for (int j=0; j < 2; j++)
for(int k=0; k < 2; k++)
ids->pzz[ix -i][iy -j][iz - k] += weight[i][j][k] * grd->invVOL;
}
}
|
8b640d2dec5b6da9d1398e2e7bdd7acbfd430211.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <inttypes.h>
#include <hip/hip_runtime.h>
#include <windows.h>
#include <chrono>
using namespace std::chrono;
__global__ void vectorAdd(double *A, double *B, double *C, int numElements)
{
int tid = blockIdx.x*1024+threadIdx.x;
if(tid<numElements)
{
C[tid] = A[tid]+B[tid];
}
__syncthreads();
}
__global__ void GPU_init()
{/*GPU*/}
/**
* Host main routine
*/
const int N=20000000;
double a[N],b[N],c[N];
int main(int argc, char **argv)
{
// GPU
hipLaunchKernelGGL(( GPU_init), dim3(1), dim3(1), 0, 0, );
srand((unsigned)time(NULL));
for(int i=0;i<N;i++)
{
a[i] = rand()%N;
b[i] = (rand()%N)*(-1);
}
for(int j=1;j<=11;j++)
{
hipDeviceSynchronize();
//
LARGE_INTEGER nFreq;
LARGE_INTEGER nBeginTime,nEndTime;
double Qtime;
float elapsedTime = 0.0;
hipEvent_t event_start, event_stop;
clock_t clock_start;
clock_t clock_end;
std::chrono::time_point<std::chrono::high_resolution_clock> c11_start, c11_end;
DWORD t1,t2;
if(atoi(argv[1]) == 1) {
QueryPerformanceFrequency(&nFreq);
QueryPerformanceCounter(&nBeginTime);
} else if(atoi(argv[1]) == 2) {
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
hipEventRecord(event_start, 0);
} else if(atoi(argv[1]) == 3) {
clock_start = clock();
} else if(atoi(argv[1]) == 4) {
c11_start = high_resolution_clock::now();
} else if(atoi(argv[1]) == 5) {
t1 = GetTickCount();
}
double *dev_a , *dev_b, *dev_c;
hipMalloc( (void**)&dev_a, N*sizeof(double) );
hipMalloc( (void**)&dev_b, N*sizeof(double) );
hipMalloc( (void**)&dev_c, N*sizeof(double) );
//
/*vectorAdd5000000*/
dim3 dimBlock(ceil((double)N/1024.0));
dim3 dimGrid(1024);
hipMemcpy( dev_a , a, N*sizeof(int), hipMemcpyHostToDevice ) ;
hipMemcpy( dev_b , b, N*sizeof(int), hipMemcpyHostToDevice ) ;
hipLaunchKernelGGL(( vectorAdd), dim3(dimBlock),dim3(dimGrid), 0, 0, dev_a,dev_b,dev_c,N);
hipMemcpy( c , dev_c , N*sizeof(int), hipMemcpyDeviceToHost) ;
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
if(atoi(argv[1]) == 1) {
// CPU
hipDeviceSynchronize();
QueryPerformanceCounter(&nEndTime);
Qtime=(double)(nEndTime.QuadPart-nBeginTime.QuadPart)/(double)nFreq.QuadPart;
printf("QueryPerformanceCounter time = %fms\n", Qtime*1000);
} else if(atoi(argv[1]) == 2) {
hipDeviceSynchronize();
hipEventRecord(event_stop, 0);
hipEventSynchronize(event_stop);
hipEventElapsedTime(&elapsedTime, event_start, event_stop);
printf("cudaevent time = %lfms\n", elapsedTime);
} else if(atoi(argv[1]) == 3) {
hipDeviceSynchronize();
clock_end= clock();
double clock_diff_sec = ((double)(clock_end- clock_start) / CLOCKS_PER_SEC);
printf("clock_ time: %lfms.\n", clock_diff_sec * 1000);
}else if(atoi(argv[1]) == 4) {
hipDeviceSynchronize();
c11_end = high_resolution_clock::now();
double elapsed_seconds = std::chrono::duration_cast<std::chrono::nanoseconds>
(c11_end-c11_start).count();
printf("chrono time: %lfms.\n", elapsed_seconds/1000/1000);
} else if(atoi(argv[1]) == 5) {
t2 = GetTickCount();
printf("GetTick time: %lfms.\n", double(t2-t1));
}
//printf("done!\n");
}
return EXIT_SUCCESS;
}
|
8b640d2dec5b6da9d1398e2e7bdd7acbfd430211.cu
|
/*
对计时函数进行评估。
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <inttypes.h>
#include <cuda.h>
#include <windows.h>
#include <chrono>
using namespace std::chrono;
__global__ void vectorAdd(double *A, double *B, double *C, int numElements)
{
int tid = blockIdx.x*1024+threadIdx.x;
if(tid<numElements)
{
C[tid] = A[tid]+B[tid];
}
__syncthreads();
}
__global__ void GPU_init()
{/*预热GPU,调用一个空的核函数*/}
/**
* Host main routine
*/
const int N=20000000;
double a[N],b[N],c[N];
int main(int argc, char **argv)
{
// 预热GPU
GPU_init<<<1, 1>>>();
srand((unsigned)time(NULL));
for(int i=0;i<N;i++)
{
a[i] = rand()%N;
b[i] = (rand()%N)*(-1);
}
for(int j=1;j<=11;j++)
{
cudaDeviceSynchronize();
// 变量申请
LARGE_INTEGER nFreq;
LARGE_INTEGER nBeginTime,nEndTime;
double Qtime;
float elapsedTime = 0.0;
cudaEvent_t event_start, event_stop;
clock_t clock_start;
clock_t clock_end;
std::chrono::time_point<std::chrono::high_resolution_clock> c11_start, c11_end;
DWORD t1,t2;
if(atoi(argv[1]) == 1) {
QueryPerformanceFrequency(&nFreq);
QueryPerformanceCounter(&nBeginTime);
} else if(atoi(argv[1]) == 2) {
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
cudaEventRecord(event_start, 0);
} else if(atoi(argv[1]) == 3) {
clock_start = clock();
} else if(atoi(argv[1]) == 4) {
c11_start = high_resolution_clock::now();
} else if(atoi(argv[1]) == 5) {
t1 = GetTickCount();
}
double *dev_a , *dev_b, *dev_c;
cudaMalloc( (void**)&dev_a, N*sizeof(double) );
cudaMalloc( (void**)&dev_b, N*sizeof(double) );
cudaMalloc( (void**)&dev_c, N*sizeof(double) );
// 四种计时方式
/*vectorAdd代码,包含内存申请、初始化、拷贝、计算、拷回、释放。数据量大小为5000000*/
dim3 dimBlock(ceil((double)N/1024.0));
dim3 dimGrid(1024);
cudaMemcpy( dev_a , a, N*sizeof(int), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dev_b , b, N*sizeof(int), cudaMemcpyHostToDevice ) ;
vectorAdd<<<dimBlock,dimGrid>>>(dev_a,dev_b,dev_c,N);
cudaMemcpy( c , dev_c , N*sizeof(int), cudaMemcpyDeviceToHost) ;
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
if(atoi(argv[1]) == 1) {
// 如果使用CPU计时方式,一定要加同步函数!!
cudaDeviceSynchronize();
QueryPerformanceCounter(&nEndTime);
Qtime=(double)(nEndTime.QuadPart-nBeginTime.QuadPart)/(double)nFreq.QuadPart;
printf("QueryPerformanceCounter time = %fms\n", Qtime*1000);
} else if(atoi(argv[1]) == 2) {
cudaDeviceSynchronize();
cudaEventRecord(event_stop, 0);
cudaEventSynchronize(event_stop);
cudaEventElapsedTime(&elapsedTime, event_start, event_stop);
printf("cudaevent time = %lfms\n", elapsedTime);
} else if(atoi(argv[1]) == 3) {
cudaDeviceSynchronize();
clock_end= clock();
double clock_diff_sec = ((double)(clock_end- clock_start) / CLOCKS_PER_SEC);
printf("clock_ time: %lfms.\n", clock_diff_sec * 1000);
}else if(atoi(argv[1]) == 4) {
cudaDeviceSynchronize();
c11_end = high_resolution_clock::now();
double elapsed_seconds = std::chrono::duration_cast<std::chrono::nanoseconds>
(c11_end-c11_start).count();
printf("chrono time: %lfms.\n", elapsed_seconds/1000/1000);
} else if(atoi(argv[1]) == 5) {
t2 = GetTickCount();
printf("GetTick time: %lfms.\n", double(t2-t1));
}
//printf("done!\n");
}
return EXIT_SUCCESS;
}
|
05373482fa598da9fbb443cae206c486fa8d07c0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "../gpu_lib/header.h"
#include "../utils/header.h"
#include <cstdio>
#include <cstdlib>
namespace ftxj {
__device__ inline float __ReLU(float x) {
return x<0.0?0.0:x>32.0?32.0:x;
};
#define MINIBATCH 8
#define UNROLL 8
__global__ void n16384_l11_kernel(
float * __restrict__ A,
float * __restrict__ B,
float * __restrict__ C,
int* __restrict__ index,
int* __restrict__ index_len,
int* __restrict__ B_index,
int batch,
int neuron,
float bias) {
extern __shared__ float shared[];
int col_gropu = threadIdx.x / 16;
int last_load = ((neuron / 16) % 6) * 16 + 16 * 2;
int start_idx = index_len[blockIdx.y];
int load_num = index_len[blockIdx.y + 1] - index_len[blockIdx.y];
for(int n = threadIdx.x; n < load_num * MINIBATCH; n += blockDim.x){
int f = n / load_num;
int k = n % load_num;
shared[f * 160 + k] = A[(blockIdx.x * MINIBATCH + f) * neuron + index[start_idx + k]];
}
__syncthreads();
// if(blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0) {
// for(int i = 0; i < load_num; ++i) {
// printf("load %d\n", index[start_idx + i]);
// }
// printf("load %d %f %f\n", index[start_idx + 7], shared[7], A[index[start_idx + 7]]);
// }
float res[MINIBATCH] = {0.0};
for(int r = 0; r < 32; ++r) {
float val = B[(blockIdx.y * 128 * 32) + r * 128 + threadIdx.x];
int idx = B_index[blockIdx.y * 8 * 32 + r * 8 + (threadIdx.x / 16)];
for(int f = 0; f < MINIBATCH / UNROLL; ++f) {
res[0 + f * UNROLL] += shared[(f * UNROLL + 0) * 160 + idx] * val;
res[1 + f * UNROLL] += shared[(f * UNROLL + 1) * 160 + idx] * val;
res[2 + f * UNROLL] += shared[(f * UNROLL + 2) * 160 + idx] * val;
res[3 + f * UNROLL] += shared[(f * UNROLL + 3) * 160 + idx] * val;
res[4 + f * UNROLL] += shared[(f * UNROLL + 4) * 160 + idx] * val;
res[5 + f * UNROLL] += shared[(f * UNROLL + 5) * 160 + idx] * val;
res[6 + f * UNROLL] += shared[(f * UNROLL + 6) * 160 + idx] * val;
res[7 + f * UNROLL] += shared[(f * UNROLL + 7) * 160 + idx] * val;
}
}
for(int f = 0; f < MINIBATCH ; ++f) {
C[(blockIdx.x * MINIBATCH + f) * neuron + blockIdx.y * 128 + threadIdx.x] = res[f];
}
}
void test_benchmark_n16384_l11_kernel(
COOMatrix& coo,
std::vector<float> &B_val,
std::vector<int> &B_index,
std::vector<int> &A_row_access,
std::vector<int> &A_row_access_len,
int max_input_access,
int batch, int neuron,
GpuEnv &env) {
float *A;
float *B;
float *C;
int *index;
int* index_len;
int* B_index_d;
int mybatch = batch;
int bias = 0;
float * input = (float*)malloc(sizeof(float) * neuron * mybatch);
memset(input, 0, sizeof(float) * neuron * mybatch);
float * output = (float*)malloc(sizeof(float) * neuron * mybatch);
memset(output, 0, sizeof(float) * neuron * mybatch);
srand (static_cast <unsigned> (time(0)));
for(int i = 0; i < mybatch; ++i) {
for(int j = 0; j < neuron; ++j) {
float r2 = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX/32.0));
input[i * neuron + j] = r2;
}
}
float* W = (float*)malloc(sizeof(float) * B_val.size());
for(int i = 0; i < B_val.size(); ++i) {
W[i] = B_val[i];
}
int* W_idx = (int*)malloc(sizeof(int) * B_index.size());
for(int i = 0; i < B_index.size(); ++i) {
W_idx[i] = B_index[i];
}
int* access = (int*)malloc(sizeof(int) * A_row_access.size());
for(int i = 0; i < A_row_access.size(); ++i) {
access[i] = A_row_access[i];
}
int* access_len = (int*)malloc(sizeof(int) * A_row_access_len.size());
for(int i = 0; i < A_row_access_len.size(); ++i) {
access_len[i] = A_row_access_len[i];
}
Safe_Call(hipMalloc((void**)&A, sizeof(float) * neuron * mybatch));
Safe_Call(hipMemcpy(A, input, sizeof(float) * neuron * mybatch, hipMemcpyHostToDevice));
Safe_Call(hipMalloc((void**)&B, sizeof(float) * B_val.size()));
Safe_Call(hipMemcpy(B, W, sizeof(float) * B_val.size(), hipMemcpyHostToDevice));
Safe_Call(hipMalloc((void**)&C, sizeof(float) * neuron * mybatch));
Safe_Call(hipMemset(C, 0, sizeof(float) * neuron * mybatch));
Safe_Call(hipMalloc((void**)&index, sizeof(int) * A_row_access.size()));
Safe_Call(hipMemcpy(index, access, sizeof(int) * A_row_access.size(), hipMemcpyHostToDevice));
Safe_Call(hipMalloc((void**)&B_index_d, sizeof(float) * B_index.size()));
Safe_Call(hipMemcpy(B_index_d, W_idx, sizeof(float) * B_index.size(), hipMemcpyHostToDevice));
Safe_Call(hipMalloc((void**)&index_len, sizeof(float) * A_row_access_len.size()));
Safe_Call(hipMemcpy(index_len, access_len, sizeof(float) * A_row_access_len.size(), hipMemcpyHostToDevice));
env.add_event("row-succ-20-uiuc-kernel");
env.event_start_record("row-succ-20-uiuc-kernel");
int blocksize = 128;
dim3 block(blocksize);
dim3 grid((mybatch + MINIBATCH - 1) / MINIBATCH, neuron / blocksize);
hipLaunchKernelGGL(( n16384_l11_kernel), dim3(grid), dim3(block), sizeof(float) * (max_input_access * MINIBATCH), env.get_stream("row-succ-20-uiuc-kernel"),
A, B, C, index, index_len, B_index_d, batch, neuron, bias
);
env.event_stop_record("row-succ-20-uiuc-kernel");
float time = env.get_event_time("row-succ-20-uiuc-kernel");
Safe_Call(hipMemcpy(output, C, sizeof(float) * neuron * mybatch, hipMemcpyDeviceToHost));
std::cout << "Kernel Exec Time [20-uiuc-row-succ-transpose] = " << time << "ms" <<std::endl;
CpuSpmm::run_and_cmp(coo, input, neuron, mybatch, output, false, true, true);
}
};
|
05373482fa598da9fbb443cae206c486fa8d07c0.cu
|
#include <cuda.h>
#include "../gpu_lib/header.h"
#include "../utils/header.h"
#include <cstdio>
#include <cstdlib>
namespace ftxj {
__device__ inline float __ReLU(float x) {
return x<0.0?0.0:x>32.0?32.0:x;
};
#define MINIBATCH 8
#define UNROLL 8
__global__ void n16384_l11_kernel(
float * __restrict__ A,
float * __restrict__ B,
float * __restrict__ C,
int* __restrict__ index,
int* __restrict__ index_len,
int* __restrict__ B_index,
int batch,
int neuron,
float bias) {
extern __shared__ float shared[];
int col_gropu = threadIdx.x / 16;
int last_load = ((neuron / 16) % 6) * 16 + 16 * 2;
int start_idx = index_len[blockIdx.y];
int load_num = index_len[blockIdx.y + 1] - index_len[blockIdx.y];
for(int n = threadIdx.x; n < load_num * MINIBATCH; n += blockDim.x){
int f = n / load_num;
int k = n % load_num;
shared[f * 160 + k] = A[(blockIdx.x * MINIBATCH + f) * neuron + index[start_idx + k]];
}
__syncthreads();
// if(blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0) {
// for(int i = 0; i < load_num; ++i) {
// printf("load %d\n", index[start_idx + i]);
// }
// printf("load %d %f %f\n", index[start_idx + 7], shared[7], A[index[start_idx + 7]]);
// }
float res[MINIBATCH] = {0.0};
for(int r = 0; r < 32; ++r) {
float val = B[(blockIdx.y * 128 * 32) + r * 128 + threadIdx.x];
int idx = B_index[blockIdx.y * 8 * 32 + r * 8 + (threadIdx.x / 16)];
for(int f = 0; f < MINIBATCH / UNROLL; ++f) {
res[0 + f * UNROLL] += shared[(f * UNROLL + 0) * 160 + idx] * val;
res[1 + f * UNROLL] += shared[(f * UNROLL + 1) * 160 + idx] * val;
res[2 + f * UNROLL] += shared[(f * UNROLL + 2) * 160 + idx] * val;
res[3 + f * UNROLL] += shared[(f * UNROLL + 3) * 160 + idx] * val;
res[4 + f * UNROLL] += shared[(f * UNROLL + 4) * 160 + idx] * val;
res[5 + f * UNROLL] += shared[(f * UNROLL + 5) * 160 + idx] * val;
res[6 + f * UNROLL] += shared[(f * UNROLL + 6) * 160 + idx] * val;
res[7 + f * UNROLL] += shared[(f * UNROLL + 7) * 160 + idx] * val;
}
}
for(int f = 0; f < MINIBATCH ; ++f) {
C[(blockIdx.x * MINIBATCH + f) * neuron + blockIdx.y * 128 + threadIdx.x] = res[f];
}
}
void test_benchmark_n16384_l11_kernel(
COOMatrix& coo,
std::vector<float> &B_val,
std::vector<int> &B_index,
std::vector<int> &A_row_access,
std::vector<int> &A_row_access_len,
int max_input_access,
int batch, int neuron,
GpuEnv &env) {
float *A;
float *B;
float *C;
int *index;
int* index_len;
int* B_index_d;
int mybatch = batch;
int bias = 0;
float * input = (float*)malloc(sizeof(float) * neuron * mybatch);
memset(input, 0, sizeof(float) * neuron * mybatch);
float * output = (float*)malloc(sizeof(float) * neuron * mybatch);
memset(output, 0, sizeof(float) * neuron * mybatch);
srand (static_cast <unsigned> (time(0)));
for(int i = 0; i < mybatch; ++i) {
for(int j = 0; j < neuron; ++j) {
float r2 = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX/32.0));
input[i * neuron + j] = r2;
}
}
float* W = (float*)malloc(sizeof(float) * B_val.size());
for(int i = 0; i < B_val.size(); ++i) {
W[i] = B_val[i];
}
int* W_idx = (int*)malloc(sizeof(int) * B_index.size());
for(int i = 0; i < B_index.size(); ++i) {
W_idx[i] = B_index[i];
}
int* access = (int*)malloc(sizeof(int) * A_row_access.size());
for(int i = 0; i < A_row_access.size(); ++i) {
access[i] = A_row_access[i];
}
int* access_len = (int*)malloc(sizeof(int) * A_row_access_len.size());
for(int i = 0; i < A_row_access_len.size(); ++i) {
access_len[i] = A_row_access_len[i];
}
Safe_Call(cudaMalloc((void**)&A, sizeof(float) * neuron * mybatch));
Safe_Call(cudaMemcpy(A, input, sizeof(float) * neuron * mybatch, cudaMemcpyHostToDevice));
Safe_Call(cudaMalloc((void**)&B, sizeof(float) * B_val.size()));
Safe_Call(cudaMemcpy(B, W, sizeof(float) * B_val.size(), cudaMemcpyHostToDevice));
Safe_Call(cudaMalloc((void**)&C, sizeof(float) * neuron * mybatch));
Safe_Call(cudaMemset(C, 0, sizeof(float) * neuron * mybatch));
Safe_Call(cudaMalloc((void**)&index, sizeof(int) * A_row_access.size()));
Safe_Call(cudaMemcpy(index, access, sizeof(int) * A_row_access.size(), cudaMemcpyHostToDevice));
Safe_Call(cudaMalloc((void**)&B_index_d, sizeof(float) * B_index.size()));
Safe_Call(cudaMemcpy(B_index_d, W_idx, sizeof(float) * B_index.size(), cudaMemcpyHostToDevice));
Safe_Call(cudaMalloc((void**)&index_len, sizeof(float) * A_row_access_len.size()));
Safe_Call(cudaMemcpy(index_len, access_len, sizeof(float) * A_row_access_len.size(), cudaMemcpyHostToDevice));
env.add_event("row-succ-20-uiuc-kernel");
env.event_start_record("row-succ-20-uiuc-kernel");
int blocksize = 128;
dim3 block(blocksize);
dim3 grid((mybatch + MINIBATCH - 1) / MINIBATCH, neuron / blocksize);
n16384_l11_kernel<<<grid, block, sizeof(float) * (max_input_access * MINIBATCH), env.get_stream("row-succ-20-uiuc-kernel")>>>(
A, B, C, index, index_len, B_index_d, batch, neuron, bias
);
env.event_stop_record("row-succ-20-uiuc-kernel");
float time = env.get_event_time("row-succ-20-uiuc-kernel");
Safe_Call(cudaMemcpy(output, C, sizeof(float) * neuron * mybatch, cudaMemcpyDeviceToHost));
std::cout << "Kernel Exec Time [20-uiuc-row-succ-transpose] = " << time << "ms" <<std::endl;
CpuSpmm::run_and_cmp(coo, input, neuron, mybatch, output, false, true, true);
}
};
|
c070b48da28ccafac90dc3bb2fc081df3c6ff0db.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define ARRAYSIZE 32
//Note: _global_: from host and run in the device(such function is called kernel),and the result of kernel is stored in GPU and explicitily transferred to host, a function has no directive will run in host(by default)
//1.read in rows, write in columns
__global__ void row_col(int* odata, int* idata, int n) {
int i;
for (i = 0; i < n; i++) {
odata[i*n + blockIdx.x] = idata[blockIdx.x*n + i];
}
}
//2.read in columns and write in rows
__global__ void col_row(int* odata, int* idata, int n) {
int i;
for (i = 0; i < n; i++) {
odata[blockIdx.x*n + i] = idata[i*n + blockIdx.x];
}
}
//3.read in rows and write in columns + unroll 4 blocks
__global__ void row_col_unroll(int* odata, int* idata, int n) {
for (int i = 0; i < 4; i++) {
int x = blockIdx.x * 4 + i;
for (int j = 0; j < n; j++) {
odata[j*n + x] = idata[x*n + j];
}
}
}
//4.read in columns and write in rows + unroll 4 blocks
__global__ void col_row_unroll(int* odata, int* idata, int n) {
for (int i = 0; i < 4; i++) {
int x = blockIdx.x * 4 + i;
for (int j = 0; j < n; j++) {
odata[x*n + j] = idata[j*n + x];
}
}
}
//5.read in rows and write in columns + diagonal
__global__ void row_col_diag(float *odata, float *idata, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < 4; j++) {
int x = i;
int y = (i + blockIdx.x * 4 + j) % n;
odata[y*n + x] = idata[x*n + y];
}
}
}
//6.read in columns and write in row + diagonal
__global__ void col_row_diag(int* odata, int* idata, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < 4; j++) {
int x = i;
int y = (i + blockIdx.x * 4 + j) % n;
odata[y*n + x] = idata[x*n + y];
}
}
}
void print_matrix(int* h_tdata, int n) {
int i, j;
printf("print matrix of %dx%d\n", n, n);
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
printf("%d ", h_tdata[i*n + j]);
}
printf("\n");
}
}
int main(int argc, char **argv) {
int m_th, m_size;
if (argc > 2) {
m_th = atoi(argv[1]);
m_size = atoi(argv[2]);
}
int i, j;
const int n = m_size;
const int mem_size = n*n * sizeof(int);
//allocate memory for the matrix in host(including input and output)
int *h_idata = (int*)malloc(mem_size);
int *h_tdata = (int*)malloc(mem_size);
//allocate memory for the matrix in device(including input and output)
int *d_idata, *d_tdata;
hipMalloc(&d_idata, mem_size);
hipMalloc(&d_tdata, mem_size);
//produce the matrix for transposition
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
h_idata[i*n + j] = i*n + j;
}
}
/*-------------------------preparation------------------------------*/
hipMemcpy(d_idata, h_idata, mem_size, hipMemcpyHostToDevice);//copy data from host to device(GPU)
//events for timing
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
float ms;
hipMemset(d_tdata, 0, mem_size);
/*-------------------implement different method-----------------------------*/
hipEventRecord(startEvent, 0);//start timing
dim3 dimGrid(n, 1);
dim3 dimBlock(1, 1);
//to do
switch (m_th) {
case 1:
row_col << <dimGrid, 1 >> > (d_tdata, d_idata, n);//<<<grid,block>>> grid: each column is a block
break;
case 2:
col_row << <dimGrid, 1 >> > (d_tdata, d_idata, n);
break;
case 3:
row_col_unroll << < dimGrid, 1 >> > (d_tdata, d_idata, n);
break;
case 4:
col_row_unroll << < dimGrid, 1 >> > (d_tdata, d_idata, n);
break;
case 5:
row_col_diag << < dimGrid, 1 >> > (d_tdata, d_idata, n);
break;
case 6:
col_row_diag << < dimGrid, 1 >> > (d_tdata, d_idata, n);
}
/*--------------------------------------------------------------------------*/
hipEventRecord(stopEvent, 0);//end timing
hipEventSynchronize(stopEvent);//stop timing
hipEventElapsedTime(&ms, startEvent, stopEvent);
hipMemcpy(h_tdata, d_tdata, mem_size, hipMemcpyDeviceToHost);//copy data from device(GPU) to host
/*print_matrix(h_idata, n);
print_matrix(h_tdata, n);*/
//calculate the elapsed time
printf("the elapsed time is:%.10f\n", ms);
/*------------------ending work:release memory in GPU and heap-----------------*/
hipEventDestroy(startEvent);
hipEventDestroy(stopEvent);
hipFree(d_tdata);
hipFree(d_idata);
free(h_idata);
free(h_tdata);
return 0;
}
|
c070b48da28ccafac90dc3bb2fc081df3c6ff0db.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define ARRAYSIZE 32
//Note: _global_: from host and run in the device(such function is called kernel),and the result of kernel is stored in GPU and explicitily transferred to host, a function has no directive will run in host(by default)
//1.read in rows, write in columns
__global__ void row_col(int* odata, int* idata, int n) {
int i;
for (i = 0; i < n; i++) {
odata[i*n + blockIdx.x] = idata[blockIdx.x*n + i];
}
}
//2.read in columns and write in rows
__global__ void col_row(int* odata, int* idata, int n) {
int i;
for (i = 0; i < n; i++) {
odata[blockIdx.x*n + i] = idata[i*n + blockIdx.x];
}
}
//3.read in rows and write in columns + unroll 4 blocks
__global__ void row_col_unroll(int* odata, int* idata, int n) {
for (int i = 0; i < 4; i++) {
int x = blockIdx.x * 4 + i;
for (int j = 0; j < n; j++) {
odata[j*n + x] = idata[x*n + j];
}
}
}
//4.read in columns and write in rows + unroll 4 blocks
__global__ void col_row_unroll(int* odata, int* idata, int n) {
for (int i = 0; i < 4; i++) {
int x = blockIdx.x * 4 + i;
for (int j = 0; j < n; j++) {
odata[x*n + j] = idata[j*n + x];
}
}
}
//5.read in rows and write in columns + diagonal
__global__ void row_col_diag(float *odata, float *idata, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < 4; j++) {
int x = i;
int y = (i + blockIdx.x * 4 + j) % n;
odata[y*n + x] = idata[x*n + y];
}
}
}
//6.read in columns and write in row + diagonal
__global__ void col_row_diag(int* odata, int* idata, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < 4; j++) {
int x = i;
int y = (i + blockIdx.x * 4 + j) % n;
odata[y*n + x] = idata[x*n + y];
}
}
}
void print_matrix(int* h_tdata, int n) {
int i, j;
printf("print matrix of %dx%d\n", n, n);
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
printf("%d ", h_tdata[i*n + j]);
}
printf("\n");
}
}
int main(int argc, char **argv) {
int m_th, m_size;
if (argc > 2) {
m_th = atoi(argv[1]);
m_size = atoi(argv[2]);
}
int i, j;
const int n = m_size;
const int mem_size = n*n * sizeof(int);
//allocate memory for the matrix in host(including input and output)
int *h_idata = (int*)malloc(mem_size);
int *h_tdata = (int*)malloc(mem_size);
//allocate memory for the matrix in device(including input and output)
int *d_idata, *d_tdata;
cudaMalloc(&d_idata, mem_size);
cudaMalloc(&d_tdata, mem_size);
//produce the matrix for transposition
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
h_idata[i*n + j] = i*n + j;
}
}
/*-------------------------preparation------------------------------*/
cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice);//copy data from host to device(GPU)
//events for timing
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
float ms;
cudaMemset(d_tdata, 0, mem_size);
/*-------------------implement different method-----------------------------*/
cudaEventRecord(startEvent, 0);//start timing
dim3 dimGrid(n, 1);
dim3 dimBlock(1, 1);
//to do
switch (m_th) {
case 1:
row_col << <dimGrid, 1 >> > (d_tdata, d_idata, n);//<<<grid,block>>> grid: each column is a block
break;
case 2:
col_row << <dimGrid, 1 >> > (d_tdata, d_idata, n);
break;
case 3:
row_col_unroll << < dimGrid, 1 >> > (d_tdata, d_idata, n);
break;
case 4:
col_row_unroll << < dimGrid, 1 >> > (d_tdata, d_idata, n);
break;
case 5:
row_col_diag << < dimGrid, 1 >> > (d_tdata, d_idata, n);
break;
case 6:
col_row_diag << < dimGrid, 1 >> > (d_tdata, d_idata, n);
}
/*--------------------------------------------------------------------------*/
cudaEventRecord(stopEvent, 0);//end timing
cudaEventSynchronize(stopEvent);//stop timing
cudaEventElapsedTime(&ms, startEvent, stopEvent);
cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost);//copy data from device(GPU) to host
/*print_matrix(h_idata, n);
print_matrix(h_tdata, n);*/
//calculate the elapsed time
printf("the elapsed time is:%.10f\n", ms);
/*------------------ending work:release memory in GPU and heap-----------------*/
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
cudaFree(d_tdata);
cudaFree(d_idata);
free(h_idata);
free(h_tdata);
return 0;
}
|
f4a4ac64111d6c8843f9678bc1be7632bc4e1e86.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include "..\configuration.h"
__device__ double kernel_function_gpu(double dis, double h)
{
double R = dis / h;
double alpha;
if (CASE_DIM == 0) alpha = (1.0 / (h * h)) * pow(PI, -1.0);
if (CASE_DIM == 1) alpha = (1.0 / (h * h * h)) * pow(PI, -1.5);
double ans = alpha * exp(-(R * R));
return ans;
}
__device__ double kernel_function_1dev_gpu(double dis, double h)
{
double R = dis / h;
double alpha;
if (CASE_DIM == 0) alpha = (1.0 / (h * h)) * pow(3.14, -1.0);
if (CASE_DIM == 1) alpha = (1.0 / (h * h * h)) * pow(3.14, -1.5);
double ans = alpha * exp(-(R * R)) * (-2.0 * R);
return ans / h;
}
__device__ double kernel_function_2dev_gpu(double dis, double h)
{
double R = dis / h;
double alpha;
if (CASE_DIM == 0) alpha = (1.0 / (h * h)) * pow(PI, -1.0);
if (CASE_DIM == 1) alpha = (1.0 / (h * h * h)) * pow(PI, -1.5);
double ans = -2.0 * alpha * (1 - 2 * R * R) * exp(-(R * R));
return ans / h / h; // local particle's position coordination's derivative, so include the R's derivative, twice.
}
|
f4a4ac64111d6c8843f9678bc1be7632bc4e1e86.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include "..\configuration.h"
__device__ double kernel_function_gpu(double dis, double h)
{
double R = dis / h;
double alpha;
if (CASE_DIM == 0) alpha = (1.0 / (h * h)) * pow(PI, -1.0);
if (CASE_DIM == 1) alpha = (1.0 / (h * h * h)) * pow(PI, -1.5);
double ans = alpha * exp(-(R * R));
return ans;
}
__device__ double kernel_function_1dev_gpu(double dis, double h)
{
double R = dis / h;
double alpha;
if (CASE_DIM == 0) alpha = (1.0 / (h * h)) * pow(3.14, -1.0);
if (CASE_DIM == 1) alpha = (1.0 / (h * h * h)) * pow(3.14, -1.5);
double ans = alpha * exp(-(R * R)) * (-2.0 * R);
return ans / h;
}
__device__ double kernel_function_2dev_gpu(double dis, double h)
{
double R = dis / h;
double alpha;
if (CASE_DIM == 0) alpha = (1.0 / (h * h)) * pow(PI, -1.0);
if (CASE_DIM == 1) alpha = (1.0 / (h * h * h)) * pow(PI, -1.5);
double ans = -2.0 * alpha * (1 - 2 * R * R) * exp(-(R * R));
return ans / h / h; // local particle's position coordination's derivative, so include the R's derivative, twice.
}
|
88a5019a1eb88220a875b398cbbd28a5d8f6f159.hip
|
// !!! This is a file automatically generated by hipify!!!
// This is from Eigen unsupported/test/cxx11_tensor_cuda.cu
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cuda_reduction
#define EIGEN_USE_GPU
#include <unsupported/Eigen/CXX11/Tensor>
#include "main.h"
#include <iostream>
using Eigen::Tensor;
void test_cuda_reduction()
{
Tensor<float, 4> in1(72,53,97,113);
Tensor<float, 2> out(72,97);
in1.setRandom();
std::size_t in1_bytes = in1.size() * sizeof(float);
std::size_t out_bytes = out.size() * sizeof(float);
float* d_in1;
float* d_out;
hipMalloc((void**)(&d_in1), in1_bytes);
hipMalloc((void**)(&d_out), out_bytes);
hipMemcpy(d_in1, in1.data(), in1_bytes, hipMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4> > gpu_in1(d_in1, 72,53,97,113);
Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97);
array<Eigen::DenseIndex, 2> reduction_axis;
reduction_axis[0] = 1;
reduction_axis[1] = 3;
gpu_out.device(gpu_device) = gpu_in1.sum(reduction_axis);
assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
for (int i = 0; i < 72; ++i) {
for (int j = 0; j < 97; ++j) {
float sum = 0;
for (int k = 0; k < 53; ++k) {
for (int l = 0; l < 113; ++l) {
sum += in1(i, k, j, l);
}
}
VERIFY_IS_APPROX(out(i,j), sum);
}
}
hipFree(d_in1);
hipFree(d_out);
}
|
88a5019a1eb88220a875b398cbbd28a5d8f6f159.cu
|
// This is from Eigen unsupported/test/cxx11_tensor_cuda.cu
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cuda_reduction
#define EIGEN_USE_GPU
#include <unsupported/Eigen/CXX11/Tensor>
#include "main.h"
#include <iostream>
using Eigen::Tensor;
void test_cuda_reduction()
{
Tensor<float, 4> in1(72,53,97,113);
Tensor<float, 2> out(72,97);
in1.setRandom();
std::size_t in1_bytes = in1.size() * sizeof(float);
std::size_t out_bytes = out.size() * sizeof(float);
float* d_in1;
float* d_out;
cudaMalloc((void**)(&d_in1), in1_bytes);
cudaMalloc((void**)(&d_out), out_bytes);
cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4> > gpu_in1(d_in1, 72,53,97,113);
Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97);
array<Eigen::DenseIndex, 2> reduction_axis;
reduction_axis[0] = 1;
reduction_axis[1] = 3;
gpu_out.device(gpu_device) = gpu_in1.sum(reduction_axis);
assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
for (int i = 0; i < 72; ++i) {
for (int j = 0; j < 97; ++j) {
float sum = 0;
for (int k = 0; k < 53; ++k) {
for (int l = 0; l < 113; ++l) {
sum += in1(i, k, j, l);
}
}
VERIFY_IS_APPROX(out(i,j), sum);
}
}
cudaFree(d_in1);
cudaFree(d_out);
}
|
1fee5ce1afe3d849bf858d5cba9e38835cd6e124.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <hiprand/hiprand_kernel.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <thrust/device_vector.h>
const int COUNT_OF_THREADS = 128;
const int COUNT_OF_DOTS = 10000000;
const double a = 0;
const double b = M_PI / 2.;
const double yMin = 0;
const double yMax = 1;
double fI() { return sin(b) - sin(a); }
__global__ void mcKernel(const int countOfIterations, int* counts, const double a, const double b, const double yMin, const double yMax){
int i = threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(i, 0, 0, &state);
for (int j = 0; j < countOfIterations; j++)
{
double2 dot = hiprand_uniform2_double(&state);
if (dot.y * yMax - yMin <= cos(dot.x * b - a))
counts[i]++;
}
}
int main()
{
hipSetDevice(0);
thrust::device_vector<int> counts(COUNT_OF_THREADS);
int* rawCounts = thrust::raw_pointer_cast(counts.data());
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventRecord(start, 0);
dim3 threadsPerBlock(COUNT_OF_THREADS);
mcKernel <<<1, threadsPerBlock >> > (COUNT_OF_DOTS / COUNT_OF_THREADS, rawCounts, a, b, yMin, yMax);
int countOfInnerPoints = thrust::reduce(counts.begin(), counts.end(), (int)0, thrust::plus<int>());
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Calculated integral: %f;\nPrecise integral: %f.\n", (b - a) * (yMax - yMin) * countOfInnerPoints / COUNT_OF_DOTS, fI());
printf("Time of calculation: %f seconds\n", elapsedTime / 1000);
hipDeviceReset();
system("pause");
return 0;
}
|
1fee5ce1afe3d849bf858d5cba9e38835cd6e124.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <curand_kernel.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <thrust/device_vector.h>
const int COUNT_OF_THREADS = 128;
const int COUNT_OF_DOTS = 10000000;
const double a = 0;
const double b = M_PI / 2.;
const double yMin = 0;
const double yMax = 1;
double fI() { return sin(b) - sin(a); }
__global__ void mcKernel(const int countOfIterations, int* counts, const double a, const double b, const double yMin, const double yMax){
int i = threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(i, 0, 0, &state);
for (int j = 0; j < countOfIterations; j++)
{
double2 dot = curand_uniform2_double(&state);
if (dot.y * yMax - yMin <= cos(dot.x * b - a))
counts[i]++;
}
}
int main()
{
cudaSetDevice(0);
thrust::device_vector<int> counts(COUNT_OF_THREADS);
int* rawCounts = thrust::raw_pointer_cast(counts.data());
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
dim3 threadsPerBlock(COUNT_OF_THREADS);
mcKernel <<<1, threadsPerBlock >> > (COUNT_OF_DOTS / COUNT_OF_THREADS, rawCounts, a, b, yMin, yMax);
int countOfInnerPoints = thrust::reduce(counts.begin(), counts.end(), (int)0, thrust::plus<int>());
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Calculated integral: %f;\nPrecise integral: %f.\n", (b - a) * (yMax - yMin) * countOfInnerPoints / COUNT_OF_DOTS, fI());
printf("Time of calculation: %f seconds\n", elapsedTime / 1000);
cudaDeviceReset();
system("pause");
return 0;
}
|
bd03ddc899bd95274c34416846990f9c11373cd6.hip
|
// !!! This is a file automatically generated by hipify!!!
/** \file Substep3.cu : implements the kernel for the substep3 procedure
*/
#define __CUDA 1
#include "fargo.h"
#undef __CUDA
#include <stdarg.h>
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
// choose only one of them (see, Stone & Norman 1992)
//#define COOLING_PREDCOR // predictor corrector (used in FARGO_ADSG)
#define COOLING_IMPLICIT // implicit version (used in FARGO3D)
// BLOCK_X : in azimuth
//#define BLOCK_X DEF_BLOCK_X_SUBSTEP2
#define BLOCK_X 64
// BLOCK_Y : in radius
#define BLOCK_Y 4
__device__ double CRadiiStuff[32768];
#define invdiffrmed CRadiiStuff[ ig]
#define cs2 CRadiiStuff[(nr+1)*1 + ig]
#define invrmed CRadiiStuff[(nr+1)*2 + ig]
#define invrmedm CRadiiStuff[(nr+1)*2 + ig-1]
#define invrinf CRadiiStuff[(nr+1)*3 + ig]
#define rinf CRadiiStuff[(nr+1)*4 + ig]
#define rmed CRadiiStuff[(nr+1)*6 + ig]
#define rmed_p CRadiiStuff[(nr+1)*6 + ig +1]
#define rmed_2p CRadiiStuff[(nr+1)*6 + ig +2]
#define rmedm CRadiiStuff[(nr+1)*6 + ig-1]
#define rsup CRadiiStuff[(nr+1)*8 + ig]
#define invdiffrsup CRadiiStuff[(nr+1)*10+ ig]
#define visco CRadiiStuff[(nr+1)*12+ ig]
#define visco_p CRadiiStuff[(nr+1)*12+ ig + 1]
#define visco_2p CRadiiStuff[(nr+1)*12+ ig + 2]
#define SigmaMed_gpu CRadiiStuff[(nr+1)*14 + ig]
#define EnergyMed_gpu CRadiiStuff[(nr+1)*14 + nr + ig]
#define CoolingTimeMed_gpu CRadiiStuff[(nr+1)*14 + nr*2 + ig]
#define QplusMed_gpu CRadiiStuff[(nr+1)*14 + nr*3 + ig]
__global__ void kernel_substep3 (double *dens,
double *vrad,
double *vtheta,
double *energy,
double *energynew,
double *viscosity,
double *tau_rr,
double *tau_rp,
double *tau_pp,
double adiabatic_index,
bool alpha_viscosity,
bool cooling,
bool visc_heating,
int ns,
int nr,
int pitch,
double invdphi,
double dt) {
// jg & ig, g like 'global' (global memory <=> full grid)
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int idg = jg + ig * pitch;
// viscous heating
double div_v = 0.0;
// if (ig < nr-1)
// div_v = divergence_vel[idg];
int jgp = jg + 1;
if (jg == ns-1) jgp = 0;
int idgjp = jgp + ig * pitch;
if (ig < nr-1) {
div_v = ((vrad[idg+pitch]*rsup - vrad[idg]*rinf)*invdiffrsup + (vtheta[idgjp]-vtheta[idg])*invdphi)*invrmed;
}
else
div_v = ((vrad[idg]*rsup - vrad[idg-pitch]*rinf)*invdiffrsup + (vtheta[idgjp]-vtheta[idg])*invdphi)*invrmed;
double qplus = 0;
if (visc_heating) {
double nu, nu_p, nu_2p;
if (alpha_viscosity) {
nu = viscosity[idg];
nu_p = viscosity[idg+pitch];
nu_2p = viscosity[idg+2*pitch];
}
else {
nu = visco;
nu_p = visco_p;
nu_2p = visco_2p;
}
if (ig > 0) {
qplus = 0.5/nu/dens[idg]*(tau_rr[idg]*tau_rr[idg] + tau_rp[idg]*tau_rp[idg] + tau_pp[idg]*tau_pp[idg] );
qplus += (2.0/9.0)*nu*dens[idg]*div_v*div_v;
}
else {
int idgp = idg+ns;
int idg2p = idgp+ns;
double qpip = 0.5/nu_p/dens[idgp]*(tau_rr[idgp]*tau_rr[idgp] + tau_rp[idgp]*tau_rp[idgp] + tau_pp[idgp]*tau_pp[idgp] );
qpip += (2.0/9.0)*nu_p*dens[idgp]*div_v*div_v;
double qpi2p = 0.5/nu_2p/dens[idg2p]*(tau_rr[idg2p]*tau_rr[idg2p] + tau_rp[idg2p]*tau_rp[idg2p] + tau_pp[idg2p]*tau_pp[idg2p] );
qpi2p += (2.0/9.0)*nu_2p*dens[idg2p]*div_v*div_v;
qplus = qpip*exp( log(qpip/qpi2p) * log(rmed/rmed_p) / log(rmed_p/rmed_2p));
}
}
// cooling
if (cooling) {
#ifdef COOLING_PREDCOR // implemented in Fargo_ADSG
double num = EnergyMed_gpu*dt*dens[idg]/SigmaMed_gpu + CoolingTimeMed_gpu*energy[idg] + 0*dt*CoolingTimeMed_gpu*(qplus-QplusMed_gpu*dens[idg]/SigmaMed_gpu);
double den = dt + CoolingTimeMed_gpu + (adiabatic_index-1.0)*dt*CoolingTimeMed_gpu*div_v;
energynew[idg] = num/den;
#endif
#ifdef COOLING_IMPLICIT
const double term = 0.5*dt*(adiabatic_index-1.0)*div_v;
if (visc_heating)
qplus -= QplusMed_gpu*dens[idg]/SigmaMed_gpu;
double qminus = (1.0/CoolingTimeMed_gpu) * (energy[idg] - EnergyMed_gpu*(dens[idg]/SigmaMed_gpu));
energynew[idg] = (energy[idg]*(1.0-term) + dt * (qplus - qminus))/(1.0+term);
#endif
}
else {
#ifdef COOLING_PREDCOR // implemented in Fargo_ADSG
double num = dt*qplus + energy[idg];
double den = 1.0+(adiabatic_index-1.0) * dt *div_v;
energynew[idg] = num/den;
#endif
#ifdef COOLING_IMPLICIT // implemented in Fargo3D
const double term = 0.5*dt*(adiabatic_index-1.0)*div_v;
energynew[idg] = (energy[idg]*(1.0-term) + dt*qplus)/(1.0+term);
#endif
}
}
void SubStep3_gpu (PolarGrid *Vrad, PolarGrid *Vtheta, PolarGrid *Rho, PolarGrid *Energy, double dt,
PolarGrid *Energy_ret) {
int nr, ns;
nr = Rho->Nrad;
ns = Rho->Nsec;
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x, (nr+block.y-1)/block.y);
double *TauRR_gpu_field = NULL;
double *TauRP_gpu_field = NULL;
double *TauPP_gpu_field = NULL;
if (Energy != NULL && ViscHeating) {
TauRR_gpu_field = TauRR->gpu_field;
TauRP_gpu_field = TauRP->gpu_field;
TauPP_gpu_field = TauPP->gpu_field;
}
checkCudaErrors(hipMemcpyToSymbol(CRadiiStuff, (void *) RadiiStuff, (size_t)(14*(nr+1))*sizeof(double), 0, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(CRadiiStuff, (void *) SigmaMed, (size_t)(nr)*sizeof(double), (14*(nr+1) )*sizeof(double), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(CRadiiStuff, (void *) EnergyMed, (size_t)(nr)*sizeof(double), (14*(nr+1) + nr)*sizeof(double), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(CRadiiStuff, (void *) CoolingTimeMed, (size_t)(nr)*sizeof(double), (14*(nr+1) + 2*nr)*sizeof(double), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(CRadiiStuff, (void *) QplusMed, (size_t)(nr)*sizeof(double), (14*(nr+1) + 3*nr)*sizeof(double), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_substep3) , dim3(grid), dim3(block) , 0, 0, Rho->gpu_field,
Vrad->gpu_field,
Vtheta->gpu_field,
Energy->gpu_field,
Energy_ret->gpu_field,
Viscosity->gpu_field,
TauRR_gpu_field,
TauRP_gpu_field,
TauPP_gpu_field,
ADIABATICINDEX,
ViscosityAlpha,
Cooling,
ViscHeating,
ns,
nr,
Energy->pitch/sizeof(double),
(double)(Rho->Nsec)/2.0/M_PI,
dt);
hipDeviceSynchronize();
getLastCudaError ("kernel_substep3 failed");
/*
if (Cooling) {
if (ViscHeating) {
kernel_substep3_cooling_vischeating <<< grid, block >>> (Rho->gpu_field,
Vrad->gpu_field,
Vtheta->gpu_field,
EnergyInt->gpu_field,
Energy->gpu_field,
Viscosity->gpu_field,
TauRR->gpu_field,
TauRP->gpu_field,
TauPP->gpu_field,
ADIABATICINDEX,
ns,
nr,
Energy->pitch/sizeof(double),
(double)(Rho->Nsec)/2.0/M_PI,
dt);
hipDeviceSynchronize();
getLastCudaError ("kernel_substep3_cooling_vischeating failed");
}
else {
kernel_substep3_cooling <<< grid, block >>> (Rho->gpu_field,
Vrad->gpu_field,
Vtheta->gpu_field,
EnergyInt->gpu_field,
Energy->gpu_field,
ADIABATICINDEX,
ns,
nr,
Energy->pitch/sizeof(double),
(double)(Rho->Nsec)/2.0/M_PI,
dt);
hipDeviceSynchronize();
getLastCudaError ("kernel_substep3_cooling failed");
}
}
else {
if (ViscHeating) {
kernel_substep3_nocooling_vischeating <<< grid, block >>> (Rho->gpu_field,
Vrad->gpu_field,
Vtheta->gpu_field,
EnergyInt->gpu_field,
Energy->gpu_field,
Viscosity->gpu_field,
TauRR->gpu_field,
TauRP->gpu_field,
TauPP->gpu_field,
ADIABATICINDEX,
ns,
nr,
Energy->pitch/sizeof(double),
(double)(Rho->Nsec)/2.0/M_PI,
dt);
hipDeviceSynchronize();
getLastCudaError ("kernel_substep3_nocooling failed");
}
else {
kernel_substep3_nocooling <<< grid, block >>> (Rho->gpu_field,
Vrad->gpu_field,
Vtheta->gpu_field,
EnergyInt->gpu_field,
Energy->gpu_field,
ADIABATICINDEX,
ns,
nr,
Energy->pitch/sizeof(double),
(double)(Rho->Nsec)/2.0/M_PI,
dt);
hipDeviceSynchronize();
getLastCudaError ("kernel_substep3_nocooling failed");
}
}*/
}
|
bd03ddc899bd95274c34416846990f9c11373cd6.cu
|
/** \file Substep3.cu : implements the kernel for the substep3 procedure
*/
#define __CUDA 1
#include "fargo.h"
#undef __CUDA
#include <stdarg.h>
#include <helper_cuda.h>
#include <cuda.h>
// choose only one of them (see, Stone & Norman 1992)
//#define COOLING_PREDCOR // predictor corrector (used in FARGO_ADSG)
#define COOLING_IMPLICIT // implicit version (used in FARGO3D)
// BLOCK_X : in azimuth
//#define BLOCK_X DEF_BLOCK_X_SUBSTEP2
#define BLOCK_X 64
// BLOCK_Y : in radius
#define BLOCK_Y 4
__device__ double CRadiiStuff[32768];
#define invdiffrmed CRadiiStuff[ ig]
#define cs2 CRadiiStuff[(nr+1)*1 + ig]
#define invrmed CRadiiStuff[(nr+1)*2 + ig]
#define invrmedm CRadiiStuff[(nr+1)*2 + ig-1]
#define invrinf CRadiiStuff[(nr+1)*3 + ig]
#define rinf CRadiiStuff[(nr+1)*4 + ig]
#define rmed CRadiiStuff[(nr+1)*6 + ig]
#define rmed_p CRadiiStuff[(nr+1)*6 + ig +1]
#define rmed_2p CRadiiStuff[(nr+1)*6 + ig +2]
#define rmedm CRadiiStuff[(nr+1)*6 + ig-1]
#define rsup CRadiiStuff[(nr+1)*8 + ig]
#define invdiffrsup CRadiiStuff[(nr+1)*10+ ig]
#define visco CRadiiStuff[(nr+1)*12+ ig]
#define visco_p CRadiiStuff[(nr+1)*12+ ig + 1]
#define visco_2p CRadiiStuff[(nr+1)*12+ ig + 2]
#define SigmaMed_gpu CRadiiStuff[(nr+1)*14 + ig]
#define EnergyMed_gpu CRadiiStuff[(nr+1)*14 + nr + ig]
#define CoolingTimeMed_gpu CRadiiStuff[(nr+1)*14 + nr*2 + ig]
#define QplusMed_gpu CRadiiStuff[(nr+1)*14 + nr*3 + ig]
__global__ void kernel_substep3 (double *dens,
double *vrad,
double *vtheta,
double *energy,
double *energynew,
double *viscosity,
double *tau_rr,
double *tau_rp,
double *tau_pp,
double adiabatic_index,
bool alpha_viscosity,
bool cooling,
bool visc_heating,
int ns,
int nr,
int pitch,
double invdphi,
double dt) {
// jg & ig, g like 'global' (global memory <=> full grid)
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int idg = jg + ig * pitch;
// viscous heating
double div_v = 0.0;
// if (ig < nr-1)
// div_v = divergence_vel[idg];
int jgp = jg + 1;
if (jg == ns-1) jgp = 0;
int idgjp = jgp + ig * pitch;
if (ig < nr-1) {
div_v = ((vrad[idg+pitch]*rsup - vrad[idg]*rinf)*invdiffrsup + (vtheta[idgjp]-vtheta[idg])*invdphi)*invrmed;
}
else
div_v = ((vrad[idg]*rsup - vrad[idg-pitch]*rinf)*invdiffrsup + (vtheta[idgjp]-vtheta[idg])*invdphi)*invrmed;
double qplus = 0;
if (visc_heating) {
double nu, nu_p, nu_2p;
if (alpha_viscosity) {
nu = viscosity[idg];
nu_p = viscosity[idg+pitch];
nu_2p = viscosity[idg+2*pitch];
}
else {
nu = visco;
nu_p = visco_p;
nu_2p = visco_2p;
}
if (ig > 0) {
qplus = 0.5/nu/dens[idg]*(tau_rr[idg]*tau_rr[idg] + tau_rp[idg]*tau_rp[idg] + tau_pp[idg]*tau_pp[idg] );
qplus += (2.0/9.0)*nu*dens[idg]*div_v*div_v;
}
else {
int idgp = idg+ns;
int idg2p = idgp+ns;
double qpip = 0.5/nu_p/dens[idgp]*(tau_rr[idgp]*tau_rr[idgp] + tau_rp[idgp]*tau_rp[idgp] + tau_pp[idgp]*tau_pp[idgp] );
qpip += (2.0/9.0)*nu_p*dens[idgp]*div_v*div_v;
double qpi2p = 0.5/nu_2p/dens[idg2p]*(tau_rr[idg2p]*tau_rr[idg2p] + tau_rp[idg2p]*tau_rp[idg2p] + tau_pp[idg2p]*tau_pp[idg2p] );
qpi2p += (2.0/9.0)*nu_2p*dens[idg2p]*div_v*div_v;
qplus = qpip*exp( log(qpip/qpi2p) * log(rmed/rmed_p) / log(rmed_p/rmed_2p));
}
}
// cooling
if (cooling) {
#ifdef COOLING_PREDCOR // implemented in Fargo_ADSG
double num = EnergyMed_gpu*dt*dens[idg]/SigmaMed_gpu + CoolingTimeMed_gpu*energy[idg] + 0*dt*CoolingTimeMed_gpu*(qplus-QplusMed_gpu*dens[idg]/SigmaMed_gpu);
double den = dt + CoolingTimeMed_gpu + (adiabatic_index-1.0)*dt*CoolingTimeMed_gpu*div_v;
energynew[idg] = num/den;
#endif
#ifdef COOLING_IMPLICIT
const double term = 0.5*dt*(adiabatic_index-1.0)*div_v;
if (visc_heating)
qplus -= QplusMed_gpu*dens[idg]/SigmaMed_gpu;
double qminus = (1.0/CoolingTimeMed_gpu) * (energy[idg] - EnergyMed_gpu*(dens[idg]/SigmaMed_gpu));
energynew[idg] = (energy[idg]*(1.0-term) + dt * (qplus - qminus))/(1.0+term);
#endif
}
else {
#ifdef COOLING_PREDCOR // implemented in Fargo_ADSG
double num = dt*qplus + energy[idg];
double den = 1.0+(adiabatic_index-1.0) * dt *div_v;
energynew[idg] = num/den;
#endif
#ifdef COOLING_IMPLICIT // implemented in Fargo3D
const double term = 0.5*dt*(adiabatic_index-1.0)*div_v;
energynew[idg] = (energy[idg]*(1.0-term) + dt*qplus)/(1.0+term);
#endif
}
}
void SubStep3_gpu (PolarGrid *Vrad, PolarGrid *Vtheta, PolarGrid *Rho, PolarGrid *Energy, double dt,
PolarGrid *Energy_ret) {
int nr, ns;
nr = Rho->Nrad;
ns = Rho->Nsec;
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x, (nr+block.y-1)/block.y);
double *TauRR_gpu_field = NULL;
double *TauRP_gpu_field = NULL;
double *TauPP_gpu_field = NULL;
if (Energy != NULL && ViscHeating) {
TauRR_gpu_field = TauRR->gpu_field;
TauRP_gpu_field = TauRP->gpu_field;
TauPP_gpu_field = TauPP->gpu_field;
}
checkCudaErrors(cudaMemcpyToSymbol(CRadiiStuff, (void *) RadiiStuff, (size_t)(14*(nr+1))*sizeof(double), 0, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(CRadiiStuff, (void *) SigmaMed, (size_t)(nr)*sizeof(double), (14*(nr+1) )*sizeof(double), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(CRadiiStuff, (void *) EnergyMed, (size_t)(nr)*sizeof(double), (14*(nr+1) + nr)*sizeof(double), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(CRadiiStuff, (void *) CoolingTimeMed, (size_t)(nr)*sizeof(double), (14*(nr+1) + 2*nr)*sizeof(double), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(CRadiiStuff, (void *) QplusMed, (size_t)(nr)*sizeof(double), (14*(nr+1) + 3*nr)*sizeof(double), cudaMemcpyHostToDevice));
kernel_substep3 <<< grid, block >>> (Rho->gpu_field,
Vrad->gpu_field,
Vtheta->gpu_field,
Energy->gpu_field,
Energy_ret->gpu_field,
Viscosity->gpu_field,
TauRR_gpu_field,
TauRP_gpu_field,
TauPP_gpu_field,
ADIABATICINDEX,
ViscosityAlpha,
Cooling,
ViscHeating,
ns,
nr,
Energy->pitch/sizeof(double),
(double)(Rho->Nsec)/2.0/M_PI,
dt);
cudaThreadSynchronize();
getLastCudaError ("kernel_substep3 failed");
/*
if (Cooling) {
if (ViscHeating) {
kernel_substep3_cooling_vischeating <<< grid, block >>> (Rho->gpu_field,
Vrad->gpu_field,
Vtheta->gpu_field,
EnergyInt->gpu_field,
Energy->gpu_field,
Viscosity->gpu_field,
TauRR->gpu_field,
TauRP->gpu_field,
TauPP->gpu_field,
ADIABATICINDEX,
ns,
nr,
Energy->pitch/sizeof(double),
(double)(Rho->Nsec)/2.0/M_PI,
dt);
cudaThreadSynchronize();
getLastCudaError ("kernel_substep3_cooling_vischeating failed");
}
else {
kernel_substep3_cooling <<< grid, block >>> (Rho->gpu_field,
Vrad->gpu_field,
Vtheta->gpu_field,
EnergyInt->gpu_field,
Energy->gpu_field,
ADIABATICINDEX,
ns,
nr,
Energy->pitch/sizeof(double),
(double)(Rho->Nsec)/2.0/M_PI,
dt);
cudaThreadSynchronize();
getLastCudaError ("kernel_substep3_cooling failed");
}
}
else {
if (ViscHeating) {
kernel_substep3_nocooling_vischeating <<< grid, block >>> (Rho->gpu_field,
Vrad->gpu_field,
Vtheta->gpu_field,
EnergyInt->gpu_field,
Energy->gpu_field,
Viscosity->gpu_field,
TauRR->gpu_field,
TauRP->gpu_field,
TauPP->gpu_field,
ADIABATICINDEX,
ns,
nr,
Energy->pitch/sizeof(double),
(double)(Rho->Nsec)/2.0/M_PI,
dt);
cudaThreadSynchronize();
getLastCudaError ("kernel_substep3_nocooling failed");
}
else {
kernel_substep3_nocooling <<< grid, block >>> (Rho->gpu_field,
Vrad->gpu_field,
Vtheta->gpu_field,
EnergyInt->gpu_field,
Energy->gpu_field,
ADIABATICINDEX,
ns,
nr,
Energy->pitch/sizeof(double),
(double)(Rho->Nsec)/2.0/M_PI,
dt);
cudaThreadSynchronize();
getLastCudaError ("kernel_substep3_nocooling failed");
}
}*/
}
|
071b3444374679d124f75ef6069c008a4f449d1e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma_types.h>
#include <magma_v2.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSyevd(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *w, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork,
value_t *rwork,
magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaGels(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb,
scalar_t* hwork, magma_int_t lwork, magma_int_t* info);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
double *A, magma_int_t lda,
double *w,
double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr,
double *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
// magma [sd]geev wants to separate output arrays: wr and wi for the real
// and imaginary parts
double *wr = w;
double *wi = w + n;
(void)rwork; // unused
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
float *A, magma_int_t lda,
float *w,
float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr,
float *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
float *wr = w;
float *wi = w + n;
(void)rwork; // unused
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<c10::complex<double>, double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<double> *A, magma_int_t lda,
c10::complex<double> *w,
c10::complex<double> *VL, magma_int_t ldvl,
c10::complex<double> *VR, magma_int_t ldvr,
c10::complex<double> *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_zgeev(jobvl, jobvr, n,
reinterpret_cast<magmaDoubleComplex*>(A), lda,
reinterpret_cast<magmaDoubleComplex*>(w),
reinterpret_cast<magmaDoubleComplex*>(VL), ldvl,
reinterpret_cast<magmaDoubleComplex*>(VR), ldvr,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<c10::complex<float>, float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<float> *A, magma_int_t lda,
c10::complex<float> *w,
c10::complex<float> *VL, magma_int_t ldvl,
c10::complex<float> *VR, magma_int_t ldvr,
c10::complex<float> *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_cgeev(jobvl, jobvr, n,
reinterpret_cast<magmaFloatComplex*>(A), lda,
reinterpret_cast<magmaFloatComplex*>(w),
reinterpret_cast<magmaFloatComplex*>(VL), ldvl,
reinterpret_cast<magmaFloatComplex*>(VR), ldvr,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<float>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb,
float* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<double>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb,
double* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<c10::complex<float>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb,
c10::complex<float>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb,
reinterpret_cast<magmaFloatComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<c10::complex<double>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb,
c10::complex<double>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb,
reinterpret_cast<magmaDoubleComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
namespace {
/*
MAGMA can return errors both as a return value and in the info argument.
The return value and info should always be identical.
In general, the meaning is as given in this table.
Predefined error codes are large negative numbers. Using the symbolic
constants below is preferred, but the numeric values can be found in
include/magma_types.h.
Info | Description
----------- | -----------
info = 0 (MAGMA_SUCCESS) | Successful exit
info < 0, but small | For info = -i, the i-th argument had an illegal value
info > 0 | Function-specific error such as singular matrix
MAGMA_ERR_DEVICE_ALLOC | Could not allocate GPU device memory
MAGMA_ERR_HOST_ALLOC | Could not allocate CPU host memory
MAGMA_ERR_ILLEGAL_VALUE | An argument had an illegal value (deprecated; instead it should return -i to say the i-th argument was bad)
MAGMA_ERR_INVALID_PTR | Can't free pointer
MAGMA_ERR_NOT_IMPLEMENTED | Function or option not implemented
MAGMA_ERR_NOT_SUPPORTED | Function or option not supported on the current architecture
*/
void checkMagmaInternalError(magma_int_t info, const std::string& magma_function_name) {
// if info > 0 the error is function-specific, do nothing in this case
TORCH_CHECK(info >= 0,
"MAGMA error: ",
magma_strerror(info),
", info = ", info,
", when calling ", magma_function_name);
}
} // anonymous namespace
#endif // USE_MAGMA
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, Tensor& infos_out) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = ::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
// magmaSolve requires infos tensor to live on CPU
Tensor infos = at::empty(infos_out.sizes(), infos_out.options().device(kCPU));
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, infos.data_ptr<magma_int_t>());
infos_out.copy_(infos);
} else {
auto infos_data = infos_out.data_ptr<magma_int_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &infos_data[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&infos_data[mini_idx], batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
// infos might not get filled for empty inputs therefore at::zeros is used instead of at::empty
auto infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// This is a type dispatching helper function for 'apply_solve'
Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) {
// 'result' and 'input' should be in column major order (it should be checked before calling this function)
// the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve'
// 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations)
// 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system)
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{
apply_solve<scalar_t>(result, input, infos);
});
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors
For more information see MAGMA's documentation for GETRI and GETRF routines.
*/
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_lu_data = infos_lu.data_ptr<magma_int_t>();
auto infos_getri_data = infos_getri.data_ptr<magma_int_t>();
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
// MAGMA does not work with batch_size == 0, let's return early in this case
if (batch_size == 0) {
return;
}
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
// magmaLuBatched leaves ipiv_data values unwritten for singular matrices.
// Initialize to avoid memory access violations inside magma kernels (gh-51930).
std::fill_n(ipiv_data, batch_size * n, 1);
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, lda, ipiv_array, infos_lu_data,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur,
lda, info_array_cur_getri, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue);
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
// magmaLu and magmaGetri requires infos tensor to live on CPU
infos_lu = infos_lu.to(at::kCPU);
infos_getri = infos_getri.to(at::kCPU);
Tensor ipiv = at::empty({lda}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>());
magmaGetri<scalar_t>(
n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>());
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos_lu, infos_getri);
});
batchCheckErrors(infos_lu, "inverse_cuda");
batchCheckErrors(infos_getri, "inverse_cuda");
} else {
// magmaLu and magmaGetri requires infos tensor to live on CPU
auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri);
});
singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda");
singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors'
Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) {
// assuming result is in column major order and contains the matrices to invert
if (result.dim() > 2) {
auto input_working_copy = cloneBatchedColumnMajor(result);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse<scalar_t>(
input_working_copy, result, infos_lu, infos_getri);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse<scalar_t>(result, infos_lu, infos_getri);
});
}
return result;
}
// This is a MAGMA/cuSOLVER dispatching helper function
Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
#ifdef USE_CUSOLVER
if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) {
return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas
} else {
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
}
#else
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
#endif
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda,
b_data, lda, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda_magma(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// Todo: cusolverDn<T>potrsBatched only supports nrhs == 1 and does not have good performance.
// Batched cholesky_solve is dispatched to magma.
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_solve_helper_cuda_cusolver(self, A, upper);
} else {
return _cholesky_solve_helper_cuda_magma(self, A, upper);
}
#else
return _cholesky_solve_helper_cuda_magma(self, A, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(const Tensor& self, bool upper, const Tensor& info) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.cholesky on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
// magmaCholesky requires info to be on CPU
magma_int_t info_cpu = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info_cpu);
info.fill_(info_cpu);
} else {
TORCH_INTERNAL_ASSERT(info.is_cuda());
auto info_data = info.data_ptr<magma_int_t>();
// magmaCholeskyBatched supports only upper=false
uplo = MagmaLower;
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = ::min(batch_limit, batch_size - mini_idx);
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_data[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, nbatches, magma_queue);
}
}
#endif
}
void cholesky_helper_magma(const Tensor& input, bool upper, const Tensor& info) {
Tensor result = input;
if (input.dim() > 2) {
// MAGMA's batched cholesky operator has an off-by-one error causing IMA
// (see https://github.com/pytorch/pytorch/issues/42666). This code is based
// on the #cloneBatchedColumnMajor function however it pads the input with
// one extra element utilizing the fact that the resize_as_ method preserves
// the storage even if it's larger than the new sizes. This way if MAGMA
// reads off bounds it will still be valid user memory.
result = at::empty(input.numel() + 1, input.options());
result.resize_as_(input).transpose_(-2, -1);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(result.transpose(-2, -1).is_contiguous());
// batched MAGMA doesn't support upper=true
// we transpose and conjugate the input as a workaround
result.copy_(upper ? input.conj().transpose(-2, -1) : input);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
input.scalar_type(), "cholesky_cuda", [&] {
apply_cholesky<scalar_t>(result, upper, info);
});
if (input.dim() > 2) {
// if upper=true we need to tranpose and conjugate the result tensor
// because the cholesky decomposition is stored in the lower triangular part
if (upper) {
input.copy_(result.conj().transpose(-2, -1));
} else {
input.copy_(result);
}
}
}
// Todo: cusolverDnXpotrfBatched has some numerical issue and is not used
// here. Batched cholesky is dispatched to magma.
// We will switch to cusolverDnXpotrfBatched after the issue is fixed.
// See https://github.com/pytorch/pytorch/issues/53879.
static void cholesky_kernel(const Tensor& input, const Tensor& info, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(input) == 1 || !use_magma_) {
cholesky_helper_cusolver(input, upper, info);
} else {
cholesky_helper_magma(input, upper, info);
}
#else
cholesky_helper_magma(input, upper, info);
#endif // USE_CUSOLVER
}
REGISTER_DISPATCH(cholesky_stub, &cholesky_kernel)
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver
This is an in-place routine, content of 'input' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
MAGMA requires 'infos' to reside in CPU memory.
For more information see MAGMA's documentation for POTRS routine.
*/
template <typename scalar_t>
static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA.");
#else
// magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally
// it transfers data several times between GPU and CPU and calls lapack routine on CPU
// using magmaCholeskySolveBatched is a lot faster
// note that magmaCholeskySolve is also slow
// 'input' is modified in-place we need to clone it and replace with a diagonal matrix
// for apply_cholesky_solve
auto input_working_copy = cloneBatchedColumnMajor(input);
// 'input' tensor has to be a batch of diagonal matrix
input.fill_(0);
input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
Tensor result_u, input_u;
if (input.dim() == 2) {
// unsqueezing here so that the batched version is used
result_u = input.unsqueeze(0);
input_u = input_working_copy.unsqueeze(0);
} else {
result_u = input;
input_u = input_working_copy;
}
// magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument
// it returns a single 'magma_int_t'
// if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value.
int64_t info_tmp = 0;
apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp);
infos.fill_(info_tmp);
#endif
}
// This is a type dispatching helper function for 'apply_cholesky_inverse'
Tensor& cholesky_inverse_kernel_impl_magma(Tensor &result, Tensor& infos, bool upper) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{
apply_cholesky_inverse<scalar_t>(result, infos, upper);
});
return result;
}
Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
// the content of result is overwritten by 'apply_cholesky_inverse'
#ifdef USE_CUSOLVER
if (batchCount(result) == 1 || !use_magma_) {
return cholesky_inverse_kernel_impl_cusolver(result, infos, upper);
} else {
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
}
#else
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
#endif
}
REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the LU decomposition of a mn matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_gpu.
*/
template <typename scalar_t>
static void apply_lu_looped_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
// magmaLu and magmaLuNoPiv require infos and pivots tensor to be on CPU
// the data is later copied back to the appropriate output tensor
Tensor infos_cpu = at::empty_like(infos, infos.options().device(kCPU).pinned_memory(true));
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos_cpu.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
auto pivots_stride = pivots.size(-1);
auto batch_size = batchCount(input);
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
if (compute_pivots) {
Tensor pivots_cpu = at::empty_like(pivots, pivots.options().device(kCPU).pinned_memory(true));
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
int* infos_working_ptr = &infos_data[i];
magmaLu<scalar_t>(m, n, input_working_ptr, leading_dimension, pivots_working_ptr, infos_working_ptr);
}
pivots.copy_(pivots_cpu, /*non_blocking=*/true);
} else {
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* infos_working_ptr = &infos_data[i];
magmaLuNoPiv<scalar_t>(m, n, input_working_ptr, leading_dimension, infos_working_ptr);
}
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = ::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
infos.copy_(infos_cpu, /*non_blocking=*/true);
#endif
}
/*
Computes the LU decomposition of a mn matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_batched.
*/
template <typename scalar_t>
static void apply_lu_batched_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
magma_int_t batch_size = magma_int_cast(batchCount(input), "batchCount");
// magmaLuBatched doesn't work with zero batch dimensions
// it gives CUDA error: invalid configuration argument
if (batch_size == 0) {
infos.fill_(0);
return;
}
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
scalar_t** input_array;
ALLOCATE_ARRAY(input_array, scalar_t*, batch_size);
// Set up array of pointers to matrices
for (int64_t i = 0; i < batch_size; i++) {
input_array[i] = &input_data[i * input_matrix_stride];
}
MAGMAQueue magma_queue(input.get_device());
if (compute_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
}
magmaLuBatched<scalar_t>(m, n, input_array, leading_dimension, pivots_array, infos_data, batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(m, n, input_array, leading_dimension, infos_data, batch_size, magma_queue);
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = ::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
#endif
}
static void lu_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
// TODO: compare performance and use the best performing option based on input's sizes
if (input.dim() == 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma", [&]{
apply_lu_looped_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma", [&]{
apply_lu_batched_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
}
}
REGISTER_DISPATCH(lu_stub, &lu_magma);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& b, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
trans = conjugate_transpose ? MagmaConjTrans : trans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
// magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched
// magmaTriangularSolve is calling cuBLAS and it prints
// ** On entry to DTRSM parameter number 9 had an illegal value
// so let's use proper lda parameter here
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit;
int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
lda, b_array_cur, lda, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue);
}
#endif
}
void triangular_solve_batched_magma(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
// For batches smaller than 8 and matrix sizes larger than 64x64 cuBLAS forloop is faster than batched version
if (batchCount(A) <= 8 && A.size(-1) >= 64) {
triangular_solve_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
#ifndef USE_MAGMA
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
#else
// cuBLAS batched is faster than MAGMA batched up until 512x512, after that MAGMA is faster
if (A.size(-1) <= 512) {
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
triangular_solve_batched_magma(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
}
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau) {
// TODO: It is possible to implement efficient batched orgqr for small tau (tau.size(-1) <= 32)
// using MAGMA, however it fails on Windows because of some illegal memory reads inside MAGMA.
// See discussions in https://github.com/pytorch/pytorch/pull/51348 for comparison of cuSOLVER-MAGMA
// and Windows failure.
// For reference here is the MAGMA-based implementation: https://gist.github.com/IvanYashchuk/2db50002c9d3c1462ff769e6410ad983
#if defined(USE_CUSOLVER)
return orgqr_helper_cusolver(result, tau); // cusolver
#else
TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
void ormqr_kernel(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
#if defined(USE_CUSOLVER)
ormqr_cusolver(input, tau, other, left, transpose);
#else
TORCH_CHECK(false,
"Calling torch.ormqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(ormqr_stub, &ormqr_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_geqrf(const Tensor& input, const Tensor& tau) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.geqrf on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto input_data = input.data_ptr<scalar_t>();
auto input_matrix_stride = matrixStride(input);
auto tau_stride = tau.size(-1);
auto batch_size = batchCount(input);
auto lda = std::max<int>(1, m);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau_cpu = at::empty(tau.sizes(), tau.options().device(at::kCPU).pinned_memory(true));
scalar_t* tau_data = tau_cpu.data_ptr<scalar_t>();
scalar_t* work_data = nullptr; // workspace is not needed for geqrf2_gpu
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
// now compute the actual QR and tau
// MAGMA's geqrf2_gpu function is used, this version has LAPACK-complaint arguments.
magmaGeqrf<scalar_t>(m, n, input_working_ptr, lda, tau_working_ptr, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
tau.copy_(tau_cpu, /*non_blocking=*/true);
#endif
}
// This is a type dispatching helper function for 'apply_geqrf'
void geqrf_magma(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_magma", [&]{
apply_geqrf<scalar_t>(input, tau);
});
}
// This is a backend library dispatching helper function for calling looped batch implementation
void geqrf_looped(const Tensor& input, const Tensor& tau) {
#if defined(USE_CUSOLVER)
return geqrf_cusolver(input, tau);
#else
return geqrf_magma(input, tau);
#endif
}
// This is a backend library dispatching helper function for calling specialized batched implementation
void geqrf_batched(const Tensor& input, const Tensor& tau) {
#ifdef CUDART_VERSION
// if cuBLAS is available
return geqrf_batched_cublas(input, tau);
#else
// TODO: implement MAGMA-based path using magma_zgeqrf_expert_batched
return geqrf_looped(input, tau);
#endif
}
void geqrf_kernel(const Tensor& input, const Tensor& tau) {
// if number of rows is smaller than 32 batched is always faster for batch size > 1
// for larger number of rows number of batches condition
if (input.size(-2) <= 256 && batchCount(input) >= std::max<int64_t>(2, input.size(-2) / 16)) {
return geqrf_batched(input, tau);
} else {
return geqrf_looped(input, tau);
}
}
REGISTER_DISPATCH(geqrf_stub, &geqrf_kernel);
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns,
bool compute_q) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)");
magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)");
auto r_data = R.data_ptr<scalar_t>();
auto r_matrix_stride = matrixStride(R);
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
if (!compute_q) {
// this is for mode='r'
return;
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
auto q_data = Q.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
checkMagmaInternalError(info, "geqrf");
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
checkMagmaInternalError(info, "orgqr");
}
#endif
}
std::tuple<Tensor, Tensor> linalg_qr_helper_magma(const Tensor& self, std::string mode) {
bool compute_q, reduced;
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
int64_t n = self.size(-1);
auto r_shape = self.sizes().vec();
r_shape.end()[-2] = n_columns_q;
r_shape.end()[-1] = n;
r_working_copy = at::empty(r_shape, self.options());
if (compute_q) {
auto q_shape = q_sizes;
q_shape.end()[-1] = n_columns_q;
q_working_copy = at::zeros(q_shape, self.options());
q_working_copy.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
} else {
q_working_copy = at::empty({0}, self.options());
}
return std::make_tuple(q_working_copy, r_working_copy);
}
if (compute_q) {
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
} else {
q_working_copy = at::empty({0}, self.options());
}
r_working_copy = cloneBatchedColumnMajor(self);
int64_t m = q_sizes[self.dim() - 2];
int64_t n = r_working_copy.size(-1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q);
});
if (compute_q) {
q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q);
}
r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu();
return std::make_tuple(q_working_copy, r_working_copy);
}
std::tuple<Tensor, Tensor> _linalg_qr_helper_cuda(const Tensor& input, std::string mode) {
#if defined(USE_CUSOLVER)
// _linalg_qr_helper_default is a generic function that is implemented using
// geqrf_stub and orgqr_stub. It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return _linalg_qr_helper_default(input, mode);
#else
return linalg_qr_helper_magma(input, mode);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_magma_eigh(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.eigh/eigvalsh on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == kCPU);
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(vectors.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, lda * lda);
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_data, lda, values_data,
wA, lda, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, infos_data);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(std::max<int64_t>(1, real_impl<scalar_t, value_t>(wkopt)), "work_size");
liwork = magma_int_cast(std::max<int64_t>(1, iwkopt), "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (vectors.is_complex()) {
lrwork = magma_int_cast(std::max<int64_t>(1, rwkopt), "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
magma_int_t* info_working_ptr = &infos_data[i];
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr,
wA, lda, work, lwork, rwork, lrwork, iwork, liwork, info_working_ptr);
// The current behaviour for Linear Algebra functions to raise an error if something goes wrong
// or input doesn't satisfy some requirement
// therefore return early since further computations will be wasted anyway
if (*info_working_ptr != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
Tensor infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt).device(at::kCPU));
auto eigvals_shape = IntArrayRef(self.sizes().data(), self.dim()-1); // self.shape[:-1]
ScalarType real_dtype = toValueType(self.scalar_type());
// magmaSyevd uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(eigvals_shape, self.options().dtype(real_dtype))
: at::empty(eigvals_shape, self.options().dtype(real_dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_magma_eigh<scalar_t>(eigvals_working_copy, self_working_copy, infos, upper, eigenvectors);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This is a type dispatch function for 'apply_magma_eigh'
// For small inputs result is computed on CPU
void linalg_eigh_magma(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
// MAGMA just calls LAPACK for eigenvectors.size(-1) <= 128
// See https://bitbucket.org/icl/magma/src/e6fdca447bd402693e8b0b950a898b6879bbcc41/src/zheevd_gpu.cpp?at=master#lines-258
// in addition lda is ignored breaking 0x0 inputs
if (eigenvectors.size(-1) > 128) {
// MAGMA requires eigenvalues and infos tensors to reside on CPU
Tensor eigenvalues_cpu = eigenvalues.to(kCPU);
infos = infos.to(kCPU);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
eigenvectors.scalar_type(), "linalg_eigh_cpu", [&] {
apply_magma_eigh<scalar_t>(
eigenvalues_cpu, eigenvectors, infos, upper, compute_eigenvectors);
});
// Transfer computed by MAGMA results from CPU to GPU
eigenvalues.copy_(eigenvalues_cpu);
} else { // eigenvectors.size(-1) <= 128
// transfer to CPU, compute the result and copy back to GPU
// this is faster than going through MAGMA that does the same
Tensor eigenvalues_cpu = at::empty_like(eigenvalues, eigenvalues.options().device(kCPU));
if (compute_eigenvectors) {
Tensor eigenvectors_cpu = at::empty_like(eigenvectors, eigenvectors.options().device(kCPU));
at::linalg_eigh_out(eigenvalues_cpu, eigenvectors_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
eigenvectors.copy_(eigenvectors_cpu);
} else {
at::linalg_eigvalsh_out(eigenvalues_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
}
eigenvalues.copy_(eigenvalues_cpu);
}
}
void linalg_eigh_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#if defined(USE_CUSOLVER)
linalg_eigh_cusolver(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#else
linalg_eigh_magma(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#endif
}
REGISTER_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
value_t *rwork_data = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
ALLOCATE_ARRAY(rwork_data, value_t, n*2);
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info);
magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt));
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor out_eigvals;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
out_eigvals = at::empty({n}, options);
} else {
out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
}
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the eigenvalues and eigenvectors of n-by-n matrix 'input'.
This is an in-place routine, content of 'input', 'values', 'vectors' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
For more information see MAGMA's documentation for GEEV routine.
*/
template <typename scalar_t>
void apply_linalg_eig(Tensor& values, Tensor& vectors, Tensor& input, Tensor& infos, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.linalg.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.linalg.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == at::kCPU);
if (compute_eigenvectors) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(vectors.device() == at::kCPU);
}
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_vec_t jobvl = MagmaNoVec; // only right eigenvectors are computed
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(input);
auto input_matrix_stride = matrixStride(input);
auto values_stride = values.size(-1);
auto input_data = input.data_ptr<scalar_t>();
auto values_data = values.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto rvectors_data = compute_eigenvectors ? vectors.data_ptr<scalar_t>() : nullptr;
scalar_t* lvectors_data = nullptr; // only right eigenvectors are computed
int64_t ldvr = compute_eigenvectors ? lda : 1;
int64_t ldvl = 1;
Tensor rwork;
value_t* rwork_data = nullptr;
if (input.is_complex()) {
ScalarType real_dtype = toValueType(input.scalar_type());
rwork = at::empty({lda * 2}, input.options().dtype(real_dtype));
rwork_data = rwork.data_ptr<value_t>();
}
// call magmaEig once to get the optimal size of work_data
scalar_t work_query;
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_data, lda, values_data,
lvectors_data, ldvl, rvectors_data, ldvr, &work_query, -1, rwork_data, &infos_data[0]);
magma_int_t lwork = std::max<magma_int_t>(1, static_cast<magma_int_t>(real_impl<scalar_t, value_t>(work_query)));
Tensor work = at::empty({lwork}, input.dtype());
auto work_data = work.data_ptr<scalar_t>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* values_working_ptr = &values_data[i * values_stride];
scalar_t* rvectors_working_ptr = compute_eigenvectors ? &rvectors_data[i * input_matrix_stride] : nullptr;
int* info_working_ptr = &infos_data[i];
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_working_ptr, lda, values_working_ptr,
lvectors_data, ldvl, rvectors_working_ptr, ldvr, work_data, lwork, rwork_data, info_working_ptr);
}
#endif
}
// This is a type dispatching helper function for 'apply_linalg_eig'
void linalg_eig_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, const Tensor& input, bool compute_eigenvectors) {
// This function calculates the non-symmetric eigendecomposition in-place
// tensors should be in batched column major memory format
// the content of eigenvalues, eigenvectors and infos is overwritten by 'apply_linalg_eig'
// apply_linalg_eig modifies the provided input matrix in-place, therefore we need a copy
// MAGMA doesn't have GPU interface for the eigendecomposition and it forces us to transfer 'input' to CPU
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.is_cuda());
Tensor input_working_copy = at::empty(input.sizes(), input.options().device(kCPU));
input_working_copy.transpose_(-2, -1); // make input_working_copy to have Fortran contiguous memory layout
input_working_copy.copy_(input);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "linalg_eig_out_cuda", [&]{
apply_linalg_eig<scalar_t>(eigenvalues, eigenvectors, input_working_copy, infos, compute_eigenvectors);
});
}
REGISTER_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto lda = std::max<magma_int_t>(1, m);
auto ldvt = std::max<magma_int_t>(1, n);
auto mn = ::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt = 1; // MAGMA might not set the value for the optimal workspace therefore use 1 as the default value
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, lda,
S_working_ptr, U_working_ptr, lda, VT_working_ptr, ldvt, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = ::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
// Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V.
VT_working_copy = VT_working_copy.conj();
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
#ifdef USE_CUSOLVER
return _svd_helper_cuda_lib(self, some, compute_uv);
#else
return _svd_helper_cuda_legacy(self, some, compute_uv);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_gpu.
*/
template <typename scalar_t>
static void apply_lu_solve_looped_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
// MAGMA requires pivots to be a CPU tensor
Tensor pivots_cpu = pivots.cpu();
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots_cpu.size(-1);
auto batch_size = batchCount(b);
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
int info = 0;
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* b_working_ptr = &b_data[i * b_stride];
scalar_t* lu_working_ptr = &lu_data[i * lu_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
magmaLuSolve<scalar_t>(n, nrhs, lu_working_ptr, leading_dimension, pivots_working_ptr, b_working_ptr, leading_dimension, &info);
// info from magmaLuSolve only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_batched.
*/
template <typename scalar_t>
static void apply_lu_solve_batched_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
// Compute the result in batches of 65535
// that is the maximum allowed number for batch_size in MAGMA
constexpr int64_t batch_limit = 65535;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = ::min(batch_limit, batch_size - mini_idx);
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
int info;
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, leading_dimension,
pivots_array_cur, b_array_cur, leading_dimension,
info, nbatches, magma_queue);
// info from magmaLuSolveBatched only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
static void lu_solve_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
// TODO: compare performance and use the best performing option based on lu's sizes
if (b.dim() == 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_magma", [&]{
apply_lu_solve_looped_magma<scalar_t>(b, lu, pivots);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_magma", [&]{
apply_lu_solve_batched_magma<scalar_t>(b, lu, pivots);
});
}
}
REGISTER_DISPATCH(lu_solve_stub, &lu_solve_magma);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_gels(const Tensor& a, Tensor& b, Tensor& infos) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "torch.linalg.lstsq: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto trans = MagmaNoTrans;
auto m = magma_int_cast(a.size(-2), "m");
auto n = magma_int_cast(a.size(-1), "n");
TORCH_CHECK(
m >= n,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA");
auto nrhs = magma_int_cast(b.size(-1), "nrhs");
auto ldda = std::max<magma_int_t>(1, m);
auto lddb = std::max<magma_int_t>(1, ::max(m, n));
auto nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
auto lwork = (m - n + nb) * (nrhs + nb) + nrhs * nb;
Tensor hwork = at::empty({static_cast<int64_t>(lwork)}, a.scalar_type());
auto* hwork_ptr = hwork.data_ptr<scalar_t>();
// MAGMA requires infos tensor to live on CPU
infos = infos.to(at::kCPU);
auto infos_data = infos.data_ptr<magma_int_t>();
batch_iterator_with_broadcasting<scalar_t>(a, b,
[&](scalar_t* a_working_ptr, scalar_t* b_working_ptr,
int64_t a_linear_batch_idx) {
magma_int_t* infos_working_ptr = &infos_data[a_linear_batch_idx];
magmaGels<scalar_t>(trans, m, n, nrhs,
a_working_ptr, ldda, b_working_ptr, lddb,
hwork_ptr, lwork, infos_working_ptr);
}
);
#endif
}
void lstsq_kernel(const Tensor& a, Tensor& b, Tensor& rank, Tensor& singular_values, Tensor& infos, double rcond, std::string driver_name) {
(void)rank; // unused
(void)singular_values; // unused
(void)rcond; // unused
(void)driver_name; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "linalg_lstsq_cuda", [&] {
apply_gels<scalar_t>(a, b, infos);
});
}
REGISTER_DISPATCH(lstsq_stub, &lstsq_kernel);
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
071b3444374679d124f75ef6069c008a4f449d1e.cu
|
#include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma_types.h>
#include <magma_v2.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSyevd(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *w, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork,
value_t *rwork,
magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaGels(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb,
scalar_t* hwork, magma_int_t lwork, magma_int_t* info);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
double *A, magma_int_t lda,
double *w,
double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr,
double *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
// magma [sd]geev wants to separate output arrays: wr and wi for the real
// and imaginary parts
double *wr = w;
double *wi = w + n;
(void)rwork; // unused
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
float *A, magma_int_t lda,
float *w,
float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr,
float *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
float *wr = w;
float *wi = w + n;
(void)rwork; // unused
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<c10::complex<double>, double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<double> *A, magma_int_t lda,
c10::complex<double> *w,
c10::complex<double> *VL, magma_int_t ldvl,
c10::complex<double> *VR, magma_int_t ldvr,
c10::complex<double> *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_zgeev(jobvl, jobvr, n,
reinterpret_cast<magmaDoubleComplex*>(A), lda,
reinterpret_cast<magmaDoubleComplex*>(w),
reinterpret_cast<magmaDoubleComplex*>(VL), ldvl,
reinterpret_cast<magmaDoubleComplex*>(VR), ldvr,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<c10::complex<float>, float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<float> *A, magma_int_t lda,
c10::complex<float> *w,
c10::complex<float> *VL, magma_int_t ldvl,
c10::complex<float> *VR, magma_int_t ldvr,
c10::complex<float> *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_cgeev(jobvl, jobvr, n,
reinterpret_cast<magmaFloatComplex*>(A), lda,
reinterpret_cast<magmaFloatComplex*>(w),
reinterpret_cast<magmaFloatComplex*>(VL), ldvl,
reinterpret_cast<magmaFloatComplex*>(VR), ldvr,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<float>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb,
float* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<double>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb,
double* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<c10::complex<float>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb,
c10::complex<float>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb,
reinterpret_cast<magmaFloatComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<c10::complex<double>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb,
c10::complex<double>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb,
reinterpret_cast<magmaDoubleComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
namespace {
/*
MAGMA can return errors both as a return value and in the info argument.
The return value and info should always be identical.
In general, the meaning is as given in this table.
Predefined error codes are large negative numbers. Using the symbolic
constants below is preferred, but the numeric values can be found in
include/magma_types.h.
Info | Description
----------- | -----------
info = 0 (MAGMA_SUCCESS) | Successful exit
info < 0, but small | For info = -i, the i-th argument had an illegal value
info > 0 | Function-specific error such as singular matrix
MAGMA_ERR_DEVICE_ALLOC | Could not allocate GPU device memory
MAGMA_ERR_HOST_ALLOC | Could not allocate CPU host memory
MAGMA_ERR_ILLEGAL_VALUE | An argument had an illegal value (deprecated; instead it should return -i to say the i-th argument was bad)
MAGMA_ERR_INVALID_PTR | Can't free pointer
MAGMA_ERR_NOT_IMPLEMENTED | Function or option not implemented
MAGMA_ERR_NOT_SUPPORTED | Function or option not supported on the current architecture
*/
void checkMagmaInternalError(magma_int_t info, const std::string& magma_function_name) {
// if info > 0 the error is function-specific, do nothing in this case
TORCH_CHECK(info >= 0,
"MAGMA error: ",
magma_strerror(info),
", info = ", info,
", when calling ", magma_function_name);
}
} // anonymous namespace
#endif // USE_MAGMA
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, Tensor& infos_out) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = std::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
// magmaSolve requires infos tensor to live on CPU
Tensor infos = at::empty(infos_out.sizes(), infos_out.options().device(kCPU));
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, infos.data_ptr<magma_int_t>());
infos_out.copy_(infos);
} else {
auto infos_data = infos_out.data_ptr<magma_int_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &infos_data[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&infos_data[mini_idx], batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
// infos might not get filled for empty inputs therefore at::zeros is used instead of at::empty
auto infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// This is a type dispatching helper function for 'apply_solve'
Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) {
// 'result' and 'input' should be in column major order (it should be checked before calling this function)
// the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve'
// 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations)
// 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system)
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{
apply_solve<scalar_t>(result, input, infos);
});
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors
For more information see MAGMA's documentation for GETRI and GETRF routines.
*/
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_lu_data = infos_lu.data_ptr<magma_int_t>();
auto infos_getri_data = infos_getri.data_ptr<magma_int_t>();
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
// MAGMA does not work with batch_size == 0, let's return early in this case
if (batch_size == 0) {
return;
}
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
// magmaLuBatched leaves ipiv_data values unwritten for singular matrices.
// Initialize to avoid memory access violations inside magma kernels (gh-51930).
std::fill_n(ipiv_data, batch_size * n, 1);
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, lda, ipiv_array, infos_lu_data,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur,
lda, info_array_cur_getri, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue);
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
// magmaLu and magmaGetri requires infos tensor to live on CPU
infos_lu = infos_lu.to(at::kCPU);
infos_getri = infos_getri.to(at::kCPU);
Tensor ipiv = at::empty({lda}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>());
magmaGetri<scalar_t>(
n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>());
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos_lu, infos_getri);
});
batchCheckErrors(infos_lu, "inverse_cuda");
batchCheckErrors(infos_getri, "inverse_cuda");
} else {
// magmaLu and magmaGetri requires infos tensor to live on CPU
auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri);
});
singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda");
singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors'
Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) {
// assuming result is in column major order and contains the matrices to invert
if (result.dim() > 2) {
auto input_working_copy = cloneBatchedColumnMajor(result);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse<scalar_t>(
input_working_copy, result, infos_lu, infos_getri);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse<scalar_t>(result, infos_lu, infos_getri);
});
}
return result;
}
// This is a MAGMA/cuSOLVER dispatching helper function
Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
#ifdef USE_CUSOLVER
if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) {
return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas
} else {
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
}
#else
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
#endif
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda,
b_data, lda, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda_magma(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// Todo: cusolverDn<T>potrsBatched only supports nrhs == 1 and does not have good performance.
// Batched cholesky_solve is dispatched to magma.
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_solve_helper_cuda_cusolver(self, A, upper);
} else {
return _cholesky_solve_helper_cuda_magma(self, A, upper);
}
#else
return _cholesky_solve_helper_cuda_magma(self, A, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(const Tensor& self, bool upper, const Tensor& info) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.cholesky on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
// magmaCholesky requires info to be on CPU
magma_int_t info_cpu = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info_cpu);
info.fill_(info_cpu);
} else {
TORCH_INTERNAL_ASSERT(info.is_cuda());
auto info_data = info.data_ptr<magma_int_t>();
// magmaCholeskyBatched supports only upper=false
uplo = MagmaLower;
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = std::min(batch_limit, batch_size - mini_idx);
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_data[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, nbatches, magma_queue);
}
}
#endif
}
void cholesky_helper_magma(const Tensor& input, bool upper, const Tensor& info) {
Tensor result = input;
if (input.dim() > 2) {
// MAGMA's batched cholesky operator has an off-by-one error causing IMA
// (see https://github.com/pytorch/pytorch/issues/42666). This code is based
// on the #cloneBatchedColumnMajor function however it pads the input with
// one extra element utilizing the fact that the resize_as_ method preserves
// the storage even if it's larger than the new sizes. This way if MAGMA
// reads off bounds it will still be valid user memory.
result = at::empty(input.numel() + 1, input.options());
result.resize_as_(input).transpose_(-2, -1);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(result.transpose(-2, -1).is_contiguous());
// batched MAGMA doesn't support upper=true
// we transpose and conjugate the input as a workaround
result.copy_(upper ? input.conj().transpose(-2, -1) : input);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
input.scalar_type(), "cholesky_cuda", [&] {
apply_cholesky<scalar_t>(result, upper, info);
});
if (input.dim() > 2) {
// if upper=true we need to tranpose and conjugate the result tensor
// because the cholesky decomposition is stored in the lower triangular part
if (upper) {
input.copy_(result.conj().transpose(-2, -1));
} else {
input.copy_(result);
}
}
}
// Todo: cusolverDnXpotrfBatched has some numerical issue and is not used
// here. Batched cholesky is dispatched to magma.
// We will switch to cusolverDnXpotrfBatched after the issue is fixed.
// See https://github.com/pytorch/pytorch/issues/53879.
static void cholesky_kernel(const Tensor& input, const Tensor& info, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(input) == 1 || !use_magma_) {
cholesky_helper_cusolver(input, upper, info);
} else {
cholesky_helper_magma(input, upper, info);
}
#else
cholesky_helper_magma(input, upper, info);
#endif // USE_CUSOLVER
}
REGISTER_DISPATCH(cholesky_stub, &cholesky_kernel)
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver
This is an in-place routine, content of 'input' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
MAGMA requires 'infos' to reside in CPU memory.
For more information see MAGMA's documentation for POTRS routine.
*/
template <typename scalar_t>
static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA.");
#else
// magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally
// it transfers data several times between GPU and CPU and calls lapack routine on CPU
// using magmaCholeskySolveBatched is a lot faster
// note that magmaCholeskySolve is also slow
// 'input' is modified in-place we need to clone it and replace with a diagonal matrix
// for apply_cholesky_solve
auto input_working_copy = cloneBatchedColumnMajor(input);
// 'input' tensor has to be a batch of diagonal matrix
input.fill_(0);
input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
Tensor result_u, input_u;
if (input.dim() == 2) {
// unsqueezing here so that the batched version is used
result_u = input.unsqueeze(0);
input_u = input_working_copy.unsqueeze(0);
} else {
result_u = input;
input_u = input_working_copy;
}
// magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument
// it returns a single 'magma_int_t'
// if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value.
int64_t info_tmp = 0;
apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp);
infos.fill_(info_tmp);
#endif
}
// This is a type dispatching helper function for 'apply_cholesky_inverse'
Tensor& cholesky_inverse_kernel_impl_magma(Tensor &result, Tensor& infos, bool upper) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{
apply_cholesky_inverse<scalar_t>(result, infos, upper);
});
return result;
}
Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
// the content of result is overwritten by 'apply_cholesky_inverse'
#ifdef USE_CUSOLVER
if (batchCount(result) == 1 || !use_magma_) {
return cholesky_inverse_kernel_impl_cusolver(result, infos, upper);
} else {
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
}
#else
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
#endif
}
REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the LU decomposition of a m×n matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_gpu.
*/
template <typename scalar_t>
static void apply_lu_looped_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
// magmaLu and magmaLuNoPiv require infos and pivots tensor to be on CPU
// the data is later copied back to the appropriate output tensor
Tensor infos_cpu = at::empty_like(infos, infos.options().device(kCPU).pinned_memory(true));
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos_cpu.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
auto pivots_stride = pivots.size(-1);
auto batch_size = batchCount(input);
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
if (compute_pivots) {
Tensor pivots_cpu = at::empty_like(pivots, pivots.options().device(kCPU).pinned_memory(true));
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
int* infos_working_ptr = &infos_data[i];
magmaLu<scalar_t>(m, n, input_working_ptr, leading_dimension, pivots_working_ptr, infos_working_ptr);
}
pivots.copy_(pivots_cpu, /*non_blocking=*/true);
} else {
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* infos_working_ptr = &infos_data[i];
magmaLuNoPiv<scalar_t>(m, n, input_working_ptr, leading_dimension, infos_working_ptr);
}
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = std::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
infos.copy_(infos_cpu, /*non_blocking=*/true);
#endif
}
/*
Computes the LU decomposition of a m×n matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_batched.
*/
template <typename scalar_t>
static void apply_lu_batched_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
magma_int_t batch_size = magma_int_cast(batchCount(input), "batchCount");
// magmaLuBatched doesn't work with zero batch dimensions
// it gives CUDA error: invalid configuration argument
if (batch_size == 0) {
infos.fill_(0);
return;
}
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
scalar_t** input_array;
ALLOCATE_ARRAY(input_array, scalar_t*, batch_size);
// Set up array of pointers to matrices
for (int64_t i = 0; i < batch_size; i++) {
input_array[i] = &input_data[i * input_matrix_stride];
}
MAGMAQueue magma_queue(input.get_device());
if (compute_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
}
magmaLuBatched<scalar_t>(m, n, input_array, leading_dimension, pivots_array, infos_data, batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(m, n, input_array, leading_dimension, infos_data, batch_size, magma_queue);
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = std::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
#endif
}
static void lu_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
// TODO: compare performance and use the best performing option based on input's sizes
if (input.dim() == 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma", [&]{
apply_lu_looped_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma", [&]{
apply_lu_batched_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
}
}
REGISTER_DISPATCH(lu_stub, &lu_magma);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& b, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
trans = conjugate_transpose ? MagmaConjTrans : trans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
// magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched
// magmaTriangularSolve is calling cuBLAS and it prints
// ** On entry to DTRSM parameter number 9 had an illegal value
// so let's use proper lda parameter here
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit;
int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
lda, b_array_cur, lda, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue);
}
#endif
}
void triangular_solve_batched_magma(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
// For batches smaller than 8 and matrix sizes larger than 64x64 cuBLAS forloop is faster than batched version
if (batchCount(A) <= 8 && A.size(-1) >= 64) {
triangular_solve_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
#ifndef USE_MAGMA
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
#else
// cuBLAS batched is faster than MAGMA batched up until 512x512, after that MAGMA is faster
if (A.size(-1) <= 512) {
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
triangular_solve_batched_magma(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
}
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau) {
// TODO: It is possible to implement efficient batched orgqr for small tau (tau.size(-1) <= 32)
// using MAGMA, however it fails on Windows because of some illegal memory reads inside MAGMA.
// See discussions in https://github.com/pytorch/pytorch/pull/51348 for comparison of cuSOLVER-MAGMA
// and Windows failure.
// For reference here is the MAGMA-based implementation: https://gist.github.com/IvanYashchuk/2db50002c9d3c1462ff769e6410ad983
#if defined(USE_CUSOLVER)
return orgqr_helper_cusolver(result, tau); // cusolver
#else
TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
void ormqr_kernel(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
#if defined(USE_CUSOLVER)
ormqr_cusolver(input, tau, other, left, transpose);
#else
TORCH_CHECK(false,
"Calling torch.ormqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(ormqr_stub, &ormqr_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_geqrf(const Tensor& input, const Tensor& tau) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.geqrf on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto input_data = input.data_ptr<scalar_t>();
auto input_matrix_stride = matrixStride(input);
auto tau_stride = tau.size(-1);
auto batch_size = batchCount(input);
auto lda = std::max<int>(1, m);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau_cpu = at::empty(tau.sizes(), tau.options().device(at::kCPU).pinned_memory(true));
scalar_t* tau_data = tau_cpu.data_ptr<scalar_t>();
scalar_t* work_data = nullptr; // workspace is not needed for geqrf2_gpu
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
// now compute the actual QR and tau
// MAGMA's geqrf2_gpu function is used, this version has LAPACK-complaint arguments.
magmaGeqrf<scalar_t>(m, n, input_working_ptr, lda, tau_working_ptr, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
tau.copy_(tau_cpu, /*non_blocking=*/true);
#endif
}
// This is a type dispatching helper function for 'apply_geqrf'
void geqrf_magma(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_magma", [&]{
apply_geqrf<scalar_t>(input, tau);
});
}
// This is a backend library dispatching helper function for calling looped batch implementation
void geqrf_looped(const Tensor& input, const Tensor& tau) {
#if defined(USE_CUSOLVER)
return geqrf_cusolver(input, tau);
#else
return geqrf_magma(input, tau);
#endif
}
// This is a backend library dispatching helper function for calling specialized batched implementation
void geqrf_batched(const Tensor& input, const Tensor& tau) {
#ifdef CUDART_VERSION
// if cuBLAS is available
return geqrf_batched_cublas(input, tau);
#else
// TODO: implement MAGMA-based path using magma_zgeqrf_expert_batched
return geqrf_looped(input, tau);
#endif
}
void geqrf_kernel(const Tensor& input, const Tensor& tau) {
// if number of rows is smaller than 32 batched is always faster for batch size > 1
// for larger number of rows number of batches condition
if (input.size(-2) <= 256 && batchCount(input) >= std::max<int64_t>(2, input.size(-2) / 16)) {
return geqrf_batched(input, tau);
} else {
return geqrf_looped(input, tau);
}
}
REGISTER_DISPATCH(geqrf_stub, &geqrf_kernel);
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns,
bool compute_q) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)");
magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)");
auto r_data = R.data_ptr<scalar_t>();
auto r_matrix_stride = matrixStride(R);
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
if (!compute_q) {
// this is for mode='r'
return;
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
auto q_data = Q.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
checkMagmaInternalError(info, "geqrf");
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
checkMagmaInternalError(info, "orgqr");
}
#endif
}
std::tuple<Tensor, Tensor> linalg_qr_helper_magma(const Tensor& self, std::string mode) {
bool compute_q, reduced;
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
int64_t n = self.size(-1);
auto r_shape = self.sizes().vec();
r_shape.end()[-2] = n_columns_q;
r_shape.end()[-1] = n;
r_working_copy = at::empty(r_shape, self.options());
if (compute_q) {
auto q_shape = q_sizes;
q_shape.end()[-1] = n_columns_q;
q_working_copy = at::zeros(q_shape, self.options());
q_working_copy.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
} else {
q_working_copy = at::empty({0}, self.options());
}
return std::make_tuple(q_working_copy, r_working_copy);
}
if (compute_q) {
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
} else {
q_working_copy = at::empty({0}, self.options());
}
r_working_copy = cloneBatchedColumnMajor(self);
int64_t m = q_sizes[self.dim() - 2];
int64_t n = r_working_copy.size(-1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q);
});
if (compute_q) {
q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q);
}
r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu();
return std::make_tuple(q_working_copy, r_working_copy);
}
std::tuple<Tensor, Tensor> _linalg_qr_helper_cuda(const Tensor& input, std::string mode) {
#if defined(USE_CUSOLVER)
// _linalg_qr_helper_default is a generic function that is implemented using
// geqrf_stub and orgqr_stub. It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return _linalg_qr_helper_default(input, mode);
#else
return linalg_qr_helper_magma(input, mode);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_magma_eigh(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.eigh/eigvalsh on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == kCPU);
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(vectors.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, lda * lda);
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_data, lda, values_data,
wA, lda, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, infos_data);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(std::max<int64_t>(1, real_impl<scalar_t, value_t>(wkopt)), "work_size");
liwork = magma_int_cast(std::max<int64_t>(1, iwkopt), "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (vectors.is_complex()) {
lrwork = magma_int_cast(std::max<int64_t>(1, rwkopt), "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
magma_int_t* info_working_ptr = &infos_data[i];
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr,
wA, lda, work, lwork, rwork, lrwork, iwork, liwork, info_working_ptr);
// The current behaviour for Linear Algebra functions to raise an error if something goes wrong
// or input doesn't satisfy some requirement
// therefore return early since further computations will be wasted anyway
if (*info_working_ptr != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
Tensor infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt).device(at::kCPU));
auto eigvals_shape = IntArrayRef(self.sizes().data(), self.dim()-1); // self.shape[:-1]
ScalarType real_dtype = toValueType(self.scalar_type());
// magmaSyevd uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(eigvals_shape, self.options().dtype(real_dtype))
: at::empty(eigvals_shape, self.options().dtype(real_dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_magma_eigh<scalar_t>(eigvals_working_copy, self_working_copy, infos, upper, eigenvectors);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This is a type dispatch function for 'apply_magma_eigh'
// For small inputs result is computed on CPU
void linalg_eigh_magma(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
// MAGMA just calls LAPACK for eigenvectors.size(-1) <= 128
// See https://bitbucket.org/icl/magma/src/e6fdca447bd402693e8b0b950a898b6879bbcc41/src/zheevd_gpu.cpp?at=master#lines-258
// in addition lda is ignored breaking 0x0 inputs
if (eigenvectors.size(-1) > 128) {
// MAGMA requires eigenvalues and infos tensors to reside on CPU
Tensor eigenvalues_cpu = eigenvalues.to(kCPU);
infos = infos.to(kCPU);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
eigenvectors.scalar_type(), "linalg_eigh_cpu", [&] {
apply_magma_eigh<scalar_t>(
eigenvalues_cpu, eigenvectors, infos, upper, compute_eigenvectors);
});
// Transfer computed by MAGMA results from CPU to GPU
eigenvalues.copy_(eigenvalues_cpu);
} else { // eigenvectors.size(-1) <= 128
// transfer to CPU, compute the result and copy back to GPU
// this is faster than going through MAGMA that does the same
Tensor eigenvalues_cpu = at::empty_like(eigenvalues, eigenvalues.options().device(kCPU));
if (compute_eigenvectors) {
Tensor eigenvectors_cpu = at::empty_like(eigenvectors, eigenvectors.options().device(kCPU));
at::linalg_eigh_out(eigenvalues_cpu, eigenvectors_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
eigenvectors.copy_(eigenvectors_cpu);
} else {
at::linalg_eigvalsh_out(eigenvalues_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
}
eigenvalues.copy_(eigenvalues_cpu);
}
}
void linalg_eigh_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#if defined(USE_CUSOLVER)
linalg_eigh_cusolver(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#else
linalg_eigh_magma(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#endif
}
REGISTER_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
value_t *rwork_data = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
ALLOCATE_ARRAY(rwork_data, value_t, n*2);
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info);
magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt));
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor out_eigvals;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
out_eigvals = at::empty({n}, options);
} else {
out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
}
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the eigenvalues and eigenvectors of n-by-n matrix 'input'.
This is an in-place routine, content of 'input', 'values', 'vectors' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
For more information see MAGMA's documentation for GEEV routine.
*/
template <typename scalar_t>
void apply_linalg_eig(Tensor& values, Tensor& vectors, Tensor& input, Tensor& infos, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.linalg.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.linalg.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == at::kCPU);
if (compute_eigenvectors) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(vectors.device() == at::kCPU);
}
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_vec_t jobvl = MagmaNoVec; // only right eigenvectors are computed
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(input);
auto input_matrix_stride = matrixStride(input);
auto values_stride = values.size(-1);
auto input_data = input.data_ptr<scalar_t>();
auto values_data = values.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto rvectors_data = compute_eigenvectors ? vectors.data_ptr<scalar_t>() : nullptr;
scalar_t* lvectors_data = nullptr; // only right eigenvectors are computed
int64_t ldvr = compute_eigenvectors ? lda : 1;
int64_t ldvl = 1;
Tensor rwork;
value_t* rwork_data = nullptr;
if (input.is_complex()) {
ScalarType real_dtype = toValueType(input.scalar_type());
rwork = at::empty({lda * 2}, input.options().dtype(real_dtype));
rwork_data = rwork.data_ptr<value_t>();
}
// call magmaEig once to get the optimal size of work_data
scalar_t work_query;
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_data, lda, values_data,
lvectors_data, ldvl, rvectors_data, ldvr, &work_query, -1, rwork_data, &infos_data[0]);
magma_int_t lwork = std::max<magma_int_t>(1, static_cast<magma_int_t>(real_impl<scalar_t, value_t>(work_query)));
Tensor work = at::empty({lwork}, input.dtype());
auto work_data = work.data_ptr<scalar_t>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* values_working_ptr = &values_data[i * values_stride];
scalar_t* rvectors_working_ptr = compute_eigenvectors ? &rvectors_data[i * input_matrix_stride] : nullptr;
int* info_working_ptr = &infos_data[i];
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_working_ptr, lda, values_working_ptr,
lvectors_data, ldvl, rvectors_working_ptr, ldvr, work_data, lwork, rwork_data, info_working_ptr);
}
#endif
}
// This is a type dispatching helper function for 'apply_linalg_eig'
void linalg_eig_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, const Tensor& input, bool compute_eigenvectors) {
// This function calculates the non-symmetric eigendecomposition in-place
// tensors should be in batched column major memory format
// the content of eigenvalues, eigenvectors and infos is overwritten by 'apply_linalg_eig'
// apply_linalg_eig modifies the provided input matrix in-place, therefore we need a copy
// MAGMA doesn't have GPU interface for the eigendecomposition and it forces us to transfer 'input' to CPU
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.is_cuda());
Tensor input_working_copy = at::empty(input.sizes(), input.options().device(kCPU));
input_working_copy.transpose_(-2, -1); // make input_working_copy to have Fortran contiguous memory layout
input_working_copy.copy_(input);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "linalg_eig_out_cuda", [&]{
apply_linalg_eig<scalar_t>(eigenvalues, eigenvectors, input_working_copy, infos, compute_eigenvectors);
});
}
REGISTER_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto lda = std::max<magma_int_t>(1, m);
auto ldvt = std::max<magma_int_t>(1, n);
auto mn = std::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt = 1; // MAGMA might not set the value for the optimal workspace therefore use 1 as the default value
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, lda,
S_working_ptr, U_working_ptr, lda, VT_working_ptr, ldvt, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = std::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
// Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V.
VT_working_copy = VT_working_copy.conj();
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
#ifdef USE_CUSOLVER
return _svd_helper_cuda_lib(self, some, compute_uv);
#else
return _svd_helper_cuda_legacy(self, some, compute_uv);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_gpu.
*/
template <typename scalar_t>
static void apply_lu_solve_looped_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
// MAGMA requires pivots to be a CPU tensor
Tensor pivots_cpu = pivots.cpu();
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots_cpu.size(-1);
auto batch_size = batchCount(b);
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
int info = 0;
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* b_working_ptr = &b_data[i * b_stride];
scalar_t* lu_working_ptr = &lu_data[i * lu_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
magmaLuSolve<scalar_t>(n, nrhs, lu_working_ptr, leading_dimension, pivots_working_ptr, b_working_ptr, leading_dimension, &info);
// info from magmaLuSolve only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_batched.
*/
template <typename scalar_t>
static void apply_lu_solve_batched_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
// Compute the result in batches of 65535
// that is the maximum allowed number for batch_size in MAGMA
constexpr int64_t batch_limit = 65535;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = std::min(batch_limit, batch_size - mini_idx);
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
int info;
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, leading_dimension,
pivots_array_cur, b_array_cur, leading_dimension,
info, nbatches, magma_queue);
// info from magmaLuSolveBatched only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
static void lu_solve_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
// TODO: compare performance and use the best performing option based on lu's sizes
if (b.dim() == 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_magma", [&]{
apply_lu_solve_looped_magma<scalar_t>(b, lu, pivots);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_magma", [&]{
apply_lu_solve_batched_magma<scalar_t>(b, lu, pivots);
});
}
}
REGISTER_DISPATCH(lu_solve_stub, &lu_solve_magma);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_gels(const Tensor& a, Tensor& b, Tensor& infos) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "torch.linalg.lstsq: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto trans = MagmaNoTrans;
auto m = magma_int_cast(a.size(-2), "m");
auto n = magma_int_cast(a.size(-1), "n");
TORCH_CHECK(
m >= n,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA");
auto nrhs = magma_int_cast(b.size(-1), "nrhs");
auto ldda = std::max<magma_int_t>(1, m);
auto lddb = std::max<magma_int_t>(1, std::max(m, n));
auto nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
auto lwork = (m - n + nb) * (nrhs + nb) + nrhs * nb;
Tensor hwork = at::empty({static_cast<int64_t>(lwork)}, a.scalar_type());
auto* hwork_ptr = hwork.data_ptr<scalar_t>();
// MAGMA requires infos tensor to live on CPU
infos = infos.to(at::kCPU);
auto infos_data = infos.data_ptr<magma_int_t>();
batch_iterator_with_broadcasting<scalar_t>(a, b,
[&](scalar_t* a_working_ptr, scalar_t* b_working_ptr,
int64_t a_linear_batch_idx) {
magma_int_t* infos_working_ptr = &infos_data[a_linear_batch_idx];
magmaGels<scalar_t>(trans, m, n, nrhs,
a_working_ptr, ldda, b_working_ptr, lddb,
hwork_ptr, lwork, infos_working_ptr);
}
);
#endif
}
void lstsq_kernel(const Tensor& a, Tensor& b, Tensor& rank, Tensor& singular_values, Tensor& infos, double rcond, std::string driver_name) {
(void)rank; // unused
(void)singular_values; // unused
(void)rcond; // unused
(void)driver_name; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "linalg_lstsq_cuda", [&] {
apply_gels<scalar_t>(a, b, infos);
});
}
REGISTER_DISPATCH(lstsq_stub, &lstsq_kernel);
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
d6fb1e398bdacfc654beaa7b0beeca92093d6969.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void helloFromGPU()
{
printf("Hello, World from GPU!\n");
}
|
d6fb1e398bdacfc654beaa7b0beeca92093d6969.cu
|
#include "includes.h"
__global__ void helloFromGPU()
{
printf("Hello, World from GPU!\n");
}
|
ad9a95655ad7afa78641f6a2064a3127ba4f1eb5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "Indice2D.h"
//#include "cudaTools.h"
//#include "Device.h"
//#include "IndiceTools_GPU.h"
//#include "RayTracingMath.h"
//using namespace gpu;
//
//// Attention : Choix du nom est impotant!
//// VagueDevice.cu et non Vague.cu
//// Dans ce dernier cas, problme de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host)
//// On a donc ajouter Device (ou n'importequoi) pour que les noms soient diffrents!
//
///*----------------------------------------------------------------------*\
// |* Declaration *|
// \*---------------------------------------------------------------------*/
//
///*--------------------------------------*\
// |* Imported *|
// \*-------------------------------------*/
//
///*--------------------------------------*\
// |* Public *|
// \*-------------------------------------*/
//
//__global__ void rayTracing(uchar4* ptrDevPixels, uint w, uint h, float t);
//
///*--------------------------------------*\
// |* Private *|
// \*-------------------------------------*/
//
///*----------------------------------------------------------------------*\
// |* Implementation *|
// \*---------------------------------------------------------------------*/
//
///*--------------------------------------*\
// |* Public *|
// \*-------------------------------------*/
//
//__global__ void rayTracing(uchar4* ptrDevPixels, uint w, uint h, float t)
//{
// RayTracingMath rayTracingMath = RayTracingMath();
//
// const int TID = Indice2D::tid();
// const int NB_THREAD = Indice2D::nbThread();
// const int WH = w * h;
//
// uchar4 color;
// int i, j;
//
// int s = TID;
// while (s < WH)
// {
// IndiceTools::toIJ(s, w, &i, &j);
// rayTracingMath.colorXY(&color, i, j, t);
// ptrDevPixels[s] = color;
// s += NB_THREAD;
// }
//}
//
///*--------------------------------------*\
// |* Private *|
// \*-------------------------------------*/
//
///*----------------------------------------------------------------------*\
// |* End *|
// \*---------------------------------------------------------------------*/
//
|
ad9a95655ad7afa78641f6a2064a3127ba4f1eb5.cu
|
//#include "Indice2D.h"
//#include "cudaTools.h"
//#include "Device.h"
//#include "IndiceTools_GPU.h"
//#include "RayTracingMath.h"
//using namespace gpu;
//
//// Attention : Choix du nom est impotant!
//// VagueDevice.cu et non Vague.cu
//// Dans ce dernier cas, probl�me de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host)
//// On a donc ajouter Device (ou n'importequoi) pour que les noms soient diff�rents!
//
///*----------------------------------------------------------------------*\
// |* Declaration *|
// \*---------------------------------------------------------------------*/
//
///*--------------------------------------*\
// |* Imported *|
// \*-------------------------------------*/
//
///*--------------------------------------*\
// |* Public *|
// \*-------------------------------------*/
//
//__global__ void rayTracing(uchar4* ptrDevPixels, uint w, uint h, float t);
//
///*--------------------------------------*\
// |* Private *|
// \*-------------------------------------*/
//
///*----------------------------------------------------------------------*\
// |* Implementation *|
// \*---------------------------------------------------------------------*/
//
///*--------------------------------------*\
// |* Public *|
// \*-------------------------------------*/
//
//__global__ void rayTracing(uchar4* ptrDevPixels, uint w, uint h, float t)
//{
// RayTracingMath rayTracingMath = RayTracingMath();
//
// const int TID = Indice2D::tid();
// const int NB_THREAD = Indice2D::nbThread();
// const int WH = w * h;
//
// uchar4 color;
// int i, j;
//
// int s = TID;
// while (s < WH)
// {
// IndiceTools::toIJ(s, w, &i, &j);
// rayTracingMath.colorXY(&color, i, j, t);
// ptrDevPixels[s] = color;
// s += NB_THREAD;
// }
//}
//
///*--------------------------------------*\
// |* Private *|
// \*-------------------------------------*/
//
///*----------------------------------------------------------------------*\
// |* End *|
// \*---------------------------------------------------------------------*/
//
|
19719880726469b4557b8de6ac60d81732eaf09c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2021 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/cudaHelper.h"
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/memory.h"
#include "saiga/cuda/tests/test_helper.h"
#include "saiga/core/math/math.h"
using namespace Saiga;
template<typename T>
__global__ static void copy(Saiga::ArrayView<T> src,
Saiga::ArrayView<T> dst)
{
Saiga::CUDA::ThreadInfo<> ti;
if (ti.thread_id >= src.size()) return;
dst[ti.thread_id] = src[ti.thread_id];
}
void memcpyTest()
{
size_t N = 64 * 1000 * 1000;
size_t readWrites = N * 2 * sizeof(int);
thrust::device_vector<int> src(N);
thrust::device_vector<int> dest(N);
Saiga::CUDA::PerformanceTestHelper pth("Memcpy", readWrites);
// Test 10 times and use the median time
int its = 500;
{
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(its, [&]() {
hipMemcpy(thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(src.data()), N * sizeof(int),
hipMemcpyDeviceToDevice);
});
pth.addMeassurement("hipMemcpy", st.median);
}
const unsigned int BLOCK_SIZE = 128;
{
using T = float;
auto size =src.size() * sizeof(int) / sizeof(T);
auto srcv = Saiga::ArrayView<T>( (T*)thrust::raw_pointer_cast(src.data()),size );
auto dstv = Saiga::ArrayView<T>( (T*)thrust::raw_pointer_cast(dest.data()),size);
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(its, [&]() {
copy<T><<<THREAD_BLOCK(size, BLOCK_SIZE)>>>(srcv,dstv);
});
pth.addMeassurement("copy 4", st.median);
}
{
using T = float2;
auto size =src.size() * sizeof(int) / sizeof(T);
auto srcv = Saiga::ArrayView<T>( (T*)thrust::raw_pointer_cast(src.data()),size );
auto dstv = Saiga::ArrayView<T>( (T*)thrust::raw_pointer_cast(dest.data()),size);
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(its, [&]() {
copy<T><<<THREAD_BLOCK(size, BLOCK_SIZE)>>>(srcv,dstv);
});
pth.addMeassurement("copy 8", st.median);
}
{
using T = float4;
auto size =src.size() * sizeof(int) / sizeof(T);
auto srcv = Saiga::ArrayView<T>( (T*)thrust::raw_pointer_cast(src.data()),size );
auto dstv = Saiga::ArrayView<T>( (T*)thrust::raw_pointer_cast(dest.data()),size);
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(its, [&]() {
copy<T><<<THREAD_BLOCK(size, BLOCK_SIZE)>>>(srcv,dstv);
});
pth.addMeassurement("copy 16", st.median);
}
CUDA_SYNC_CHECK_ERROR();
}
int main(int argc, char* argv[])
{
memcpyTest();
return 0;
}
|
19719880726469b4557b8de6ac60d81732eaf09c.cu
|
/**
* Copyright (c) 2021 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/cudaHelper.h"
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/memory.h"
#include "saiga/cuda/tests/test_helper.h"
#include "saiga/core/math/math.h"
using namespace Saiga;
template<typename T>
__global__ static void copy(Saiga::ArrayView<T> src,
Saiga::ArrayView<T> dst)
{
Saiga::CUDA::ThreadInfo<> ti;
if (ti.thread_id >= src.size()) return;
dst[ti.thread_id] = src[ti.thread_id];
}
void memcpyTest()
{
size_t N = 64 * 1000 * 1000;
size_t readWrites = N * 2 * sizeof(int);
thrust::device_vector<int> src(N);
thrust::device_vector<int> dest(N);
Saiga::CUDA::PerformanceTestHelper pth("Memcpy", readWrites);
// Test 10 times and use the median time
int its = 500;
{
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(its, [&]() {
cudaMemcpy(thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(src.data()), N * sizeof(int),
cudaMemcpyDeviceToDevice);
});
pth.addMeassurement("cudaMemcpy", st.median);
}
const unsigned int BLOCK_SIZE = 128;
{
using T = float;
auto size =src.size() * sizeof(int) / sizeof(T);
auto srcv = Saiga::ArrayView<T>( (T*)thrust::raw_pointer_cast(src.data()),size );
auto dstv = Saiga::ArrayView<T>( (T*)thrust::raw_pointer_cast(dest.data()),size);
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(its, [&]() {
copy<T><<<THREAD_BLOCK(size, BLOCK_SIZE)>>>(srcv,dstv);
});
pth.addMeassurement("copy 4", st.median);
}
{
using T = float2;
auto size =src.size() * sizeof(int) / sizeof(T);
auto srcv = Saiga::ArrayView<T>( (T*)thrust::raw_pointer_cast(src.data()),size );
auto dstv = Saiga::ArrayView<T>( (T*)thrust::raw_pointer_cast(dest.data()),size);
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(its, [&]() {
copy<T><<<THREAD_BLOCK(size, BLOCK_SIZE)>>>(srcv,dstv);
});
pth.addMeassurement("copy 8", st.median);
}
{
using T = float4;
auto size =src.size() * sizeof(int) / sizeof(T);
auto srcv = Saiga::ArrayView<T>( (T*)thrust::raw_pointer_cast(src.data()),size );
auto dstv = Saiga::ArrayView<T>( (T*)thrust::raw_pointer_cast(dest.data()),size);
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(its, [&]() {
copy<T><<<THREAD_BLOCK(size, BLOCK_SIZE)>>>(srcv,dstv);
});
pth.addMeassurement("copy 16", st.median);
}
CUDA_SYNC_CHECK_ERROR();
}
int main(int argc, char* argv[])
{
memcpyTest();
return 0;
}
|
42ec1e8090f62d6a9fc7e4df990e0ec715cc2e8e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_accelerate_kernel;
int xdim0_accelerate_kernel_h = -1;
__constant__ int ydim0_accelerate_kernel;
int ydim0_accelerate_kernel_h = -1;
__constant__ int xdim1_accelerate_kernel;
int xdim1_accelerate_kernel_h = -1;
__constant__ int ydim1_accelerate_kernel;
int ydim1_accelerate_kernel_h = -1;
__constant__ int xdim2_accelerate_kernel;
int xdim2_accelerate_kernel_h = -1;
__constant__ int ydim2_accelerate_kernel;
int ydim2_accelerate_kernel_h = -1;
__constant__ int xdim3_accelerate_kernel;
int xdim3_accelerate_kernel_h = -1;
__constant__ int ydim3_accelerate_kernel;
int ydim3_accelerate_kernel_h = -1;
__constant__ int xdim4_accelerate_kernel;
int xdim4_accelerate_kernel_h = -1;
__constant__ int ydim4_accelerate_kernel;
int ydim4_accelerate_kernel_h = -1;
__constant__ int xdim5_accelerate_kernel;
int xdim5_accelerate_kernel_h = -1;
__constant__ int ydim5_accelerate_kernel;
int ydim5_accelerate_kernel_h = -1;
__constant__ int xdim6_accelerate_kernel;
int xdim6_accelerate_kernel_h = -1;
__constant__ int ydim6_accelerate_kernel;
int ydim6_accelerate_kernel_h = -1;
__constant__ int xdim7_accelerate_kernel;
int xdim7_accelerate_kernel_h = -1;
__constant__ int ydim7_accelerate_kernel;
int ydim7_accelerate_kernel_h = -1;
__constant__ int xdim8_accelerate_kernel;
int xdim8_accelerate_kernel_h = -1;
__constant__ int ydim8_accelerate_kernel;
int ydim8_accelerate_kernel_h = -1;
__constant__ int xdim9_accelerate_kernel;
int xdim9_accelerate_kernel_h = -1;
__constant__ int ydim9_accelerate_kernel;
int ydim9_accelerate_kernel_h = -1;
__constant__ int xdim10_accelerate_kernel;
int xdim10_accelerate_kernel_h = -1;
__constant__ int ydim10_accelerate_kernel;
int ydim10_accelerate_kernel_h = -1;
__constant__ int xdim11_accelerate_kernel;
int xdim11_accelerate_kernel_h = -1;
__constant__ int ydim11_accelerate_kernel;
int ydim11_accelerate_kernel_h = -1;
__constant__ int xdim12_accelerate_kernel;
int xdim12_accelerate_kernel_h = -1;
__constant__ int ydim12_accelerate_kernel;
int ydim12_accelerate_kernel_h = -1;
__constant__ int xdim13_accelerate_kernel;
int xdim13_accelerate_kernel_h = -1;
__constant__ int ydim13_accelerate_kernel;
int ydim13_accelerate_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
#undef OPS_ACC11
#undef OPS_ACC12
#undef OPS_ACC13
#define OPS_ACC0(x, y, z) \
(x + xdim0_accelerate_kernel * (y) + \
xdim0_accelerate_kernel * ydim0_accelerate_kernel * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_accelerate_kernel * (y) + \
xdim1_accelerate_kernel * ydim1_accelerate_kernel * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_accelerate_kernel * (y) + \
xdim2_accelerate_kernel * ydim2_accelerate_kernel * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_accelerate_kernel * (y) + \
xdim3_accelerate_kernel * ydim3_accelerate_kernel * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_accelerate_kernel * (y) + \
xdim4_accelerate_kernel * ydim4_accelerate_kernel * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_accelerate_kernel * (y) + \
xdim5_accelerate_kernel * ydim5_accelerate_kernel * (z))
#define OPS_ACC6(x, y, z) \
(x + xdim6_accelerate_kernel * (y) + \
xdim6_accelerate_kernel * ydim6_accelerate_kernel * (z))
#define OPS_ACC7(x, y, z) \
(x + xdim7_accelerate_kernel * (y) + \
xdim7_accelerate_kernel * ydim7_accelerate_kernel * (z))
#define OPS_ACC8(x, y, z) \
(x + xdim8_accelerate_kernel * (y) + \
xdim8_accelerate_kernel * ydim8_accelerate_kernel * (z))
#define OPS_ACC9(x, y, z) \
(x + xdim9_accelerate_kernel * (y) + \
xdim9_accelerate_kernel * ydim9_accelerate_kernel * (z))
#define OPS_ACC10(x, y, z) \
(x + xdim10_accelerate_kernel * (y) + \
xdim10_accelerate_kernel * ydim10_accelerate_kernel * (z))
#define OPS_ACC11(x, y, z) \
(x + xdim11_accelerate_kernel * (y) + \
xdim11_accelerate_kernel * ydim11_accelerate_kernel * (z))
#define OPS_ACC12(x, y, z) \
(x + xdim12_accelerate_kernel * (y) + \
xdim12_accelerate_kernel * ydim12_accelerate_kernel * (z))
#define OPS_ACC13(x, y, z) \
(x + xdim13_accelerate_kernel * (y) + \
xdim13_accelerate_kernel * ydim13_accelerate_kernel * (z))
// user function
__device__
void
accelerate_kernel_gpu(const double *density0, const double *volume,
double *stepbymass, const double *xvel0,
double *xvel1, const double *xarea,
const double *pressure, const double *yvel0,
double *yvel1, const double *yarea,
const double *viscosity, const double *zvel0,
double *zvel1, const double *zarea) {
double nodal_mass = 0.0;
nodal_mass = (density0[OPS_ACC0(-1, -1, 0)] * volume[OPS_ACC1(-1, -1, 0)] +
density0[OPS_ACC0(0, -1, 0)] * volume[OPS_ACC1(0, -1, 0)] +
density0[OPS_ACC0(0, 0, 0)] * volume[OPS_ACC1(0, 0, 0)] +
density0[OPS_ACC0(-1, 0, 0)] * volume[OPS_ACC1(-1, 0, 0)] +
density0[OPS_ACC0(-1, -1, -1)] * volume[OPS_ACC1(-1, -1, -1)] +
density0[OPS_ACC0(0, -1, -1)] * volume[OPS_ACC1(0, -1, -1)] +
density0[OPS_ACC0(0, 0, -1)] * volume[OPS_ACC1(0, 0, -1)] +
density0[OPS_ACC0(-1, 0, -1)] * volume[OPS_ACC1(-1, 0, -1)]) *
0.125;
stepbymass[OPS_ACC2(0, 0, 0)] = 0.25 * dt / nodal_mass;
xvel1[OPS_ACC4(0, 0, 0)] =
xvel0[OPS_ACC3(0, 0, 0)] -
stepbymass[OPS_ACC2(0, 0, 0)] *
(xarea[OPS_ACC5(0, 0, 0)] *
(pressure[OPS_ACC6(0, 0, 0)] - pressure[OPS_ACC6(-1, 0, 0)]) +
xarea[OPS_ACC5(0, -1, 0)] *
(pressure[OPS_ACC6(0, -1, 0)] - pressure[OPS_ACC6(-1, -1, 0)]) +
xarea[OPS_ACC5(0, 0, -1)] *
(pressure[OPS_ACC6(0, 0, -1)] - pressure[OPS_ACC6(-1, 0, -1)]) +
xarea[OPS_ACC5(0, -1, -1)] * (pressure[OPS_ACC6(0, -1, -1)] -
pressure[OPS_ACC6(-1, -1, -1)]));
yvel1[OPS_ACC8(0, 0, 0)] =
yvel0[OPS_ACC7(0, 0, 0)] -
stepbymass[OPS_ACC2(0, 0, 0)] *
(yarea[OPS_ACC9(0, 0, 0)] *
(pressure[OPS_ACC6(0, 0, 0)] - pressure[OPS_ACC6(0, -1, 0)]) +
yarea[OPS_ACC9(-1, 0, 0)] *
(pressure[OPS_ACC6(-1, 0, 0)] - pressure[OPS_ACC6(-1, -1, 0)]) +
yarea[OPS_ACC9(0, 0, -1)] *
(pressure[OPS_ACC6(0, 0, -1)] - pressure[OPS_ACC6(0, -1, -1)]) +
yarea[OPS_ACC9(-1, 0, -1)] * (pressure[OPS_ACC6(-1, 0, -1)] -
pressure[OPS_ACC6(-1, -1, -1)]));
zvel1[OPS_ACC12(0, 0, 0)] =
zvel0[OPS_ACC11(0, 0, 0)] -
stepbymass[OPS_ACC2(0, 0, 0)] *
(zarea[OPS_ACC13(0, 0, 0)] *
(pressure[OPS_ACC6(0, 0, 0)] - pressure[OPS_ACC6(0, 0, -1)]) +
zarea[OPS_ACC13(0, -1, 0)] *
(pressure[OPS_ACC6(0, -1, 0)] - pressure[OPS_ACC6(0, -1, -1)]) +
zarea[OPS_ACC13(-1, 0, 0)] *
(pressure[OPS_ACC6(-1, 0, 0)] - pressure[OPS_ACC6(-1, 0, -1)]) +
zarea[OPS_ACC13(-1, -1, 0)] * (pressure[OPS_ACC6(-1, -1, 0)] -
pressure[OPS_ACC6(-1, -1, -1)]));
xvel1[OPS_ACC4(0, 0, 0)] =
xvel1[OPS_ACC4(0, 0, 0)] -
stepbymass[OPS_ACC2(0, 0, 0)] *
(xarea[OPS_ACC5(0, 0, 0)] * (viscosity[OPS_ACC10(0, 0, 0)] -
viscosity[OPS_ACC10(-1, 0, 0)]) +
xarea[OPS_ACC5(0, -1, 0)] * (viscosity[OPS_ACC10(0, -1, 0)] -
viscosity[OPS_ACC10(-1, -1, 0)]) +
xarea[OPS_ACC5(0, 0, -1)] * (viscosity[OPS_ACC10(0, 0, -1)] -
viscosity[OPS_ACC10(-1, 0, -1)]) +
xarea[OPS_ACC5(0, -1, -1)] * (viscosity[OPS_ACC10(0, -1, -1)] -
viscosity[OPS_ACC10(-1, -1, -1)]));
yvel1[OPS_ACC8(0, 0, 0)] =
yvel1[OPS_ACC8(0, 0, 0)] -
stepbymass[OPS_ACC2(0, 0, 0)] *
(yarea[OPS_ACC9(0, 0, 0)] * (viscosity[OPS_ACC10(0, 0, 0)] -
viscosity[OPS_ACC10(0, -1, 0)]) +
yarea[OPS_ACC9(-1, 0, 0)] * (viscosity[OPS_ACC10(-1, 0, 0)] -
viscosity[OPS_ACC10(-1, -1, 0)]) +
yarea[OPS_ACC9(0, 0, -1)] * (viscosity[OPS_ACC10(0, 0, -1)] -
viscosity[OPS_ACC10(0, -1, -1)]) +
yarea[OPS_ACC9(-1, 0, -1)] * (viscosity[OPS_ACC10(-1, 0, -1)] -
viscosity[OPS_ACC10(-1, -1, -1)]));
zvel1[OPS_ACC12(0, 0, 0)] =
zvel1[OPS_ACC12(0, 0, 0)] -
stepbymass[OPS_ACC2(0, 0, 0)] *
(zarea[OPS_ACC13(0, 0, 0)] * (viscosity[OPS_ACC10(0, 0, 0)] -
viscosity[OPS_ACC10(0, 0, -1)]) +
zarea[OPS_ACC13(0, -1, 0)] * (viscosity[OPS_ACC10(0, -1, 0)] -
viscosity[OPS_ACC10(0, -1, -1)]) +
zarea[OPS_ACC13(-1, 0, 0)] * (viscosity[OPS_ACC10(-1, 0, 0)] -
viscosity[OPS_ACC10(-1, 0, -1)]) +
zarea[OPS_ACC13(-1, -1, 0)] * (viscosity[OPS_ACC10(-1, -1, 0)] -
viscosity[OPS_ACC10(-1, -1, -1)]));
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
#undef OPS_ACC11
#undef OPS_ACC12
#undef OPS_ACC13
__global__ void ops_accelerate_kernel(
const double *__restrict arg0, const double *__restrict arg1,
double *__restrict arg2, const double *__restrict arg3,
double *__restrict arg4, const double *__restrict arg5,
const double *__restrict arg6, const double *__restrict arg7,
double *__restrict arg8, const double *__restrict arg9,
const double *__restrict arg10, const double *__restrict arg11,
double *__restrict arg12, const double *__restrict arg13, int size0,
int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_accelerate_kernel +
idx_z * 1 * 1 * xdim0_accelerate_kernel * ydim0_accelerate_kernel;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_accelerate_kernel +
idx_z * 1 * 1 * xdim1_accelerate_kernel * ydim1_accelerate_kernel;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_accelerate_kernel +
idx_z * 1 * 1 * xdim2_accelerate_kernel * ydim2_accelerate_kernel;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_accelerate_kernel +
idx_z * 1 * 1 * xdim3_accelerate_kernel * ydim3_accelerate_kernel;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_accelerate_kernel +
idx_z * 1 * 1 * xdim4_accelerate_kernel * ydim4_accelerate_kernel;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_accelerate_kernel +
idx_z * 1 * 1 * xdim5_accelerate_kernel * ydim5_accelerate_kernel;
arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_accelerate_kernel +
idx_z * 1 * 1 * xdim6_accelerate_kernel * ydim6_accelerate_kernel;
arg7 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim7_accelerate_kernel +
idx_z * 1 * 1 * xdim7_accelerate_kernel * ydim7_accelerate_kernel;
arg8 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim8_accelerate_kernel +
idx_z * 1 * 1 * xdim8_accelerate_kernel * ydim8_accelerate_kernel;
arg9 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim9_accelerate_kernel +
idx_z * 1 * 1 * xdim9_accelerate_kernel * ydim9_accelerate_kernel;
arg10 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim10_accelerate_kernel +
idx_z * 1 * 1 * xdim10_accelerate_kernel * ydim10_accelerate_kernel;
arg11 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim11_accelerate_kernel +
idx_z * 1 * 1 * xdim11_accelerate_kernel * ydim11_accelerate_kernel;
arg12 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim12_accelerate_kernel +
idx_z * 1 * 1 * xdim12_accelerate_kernel * ydim12_accelerate_kernel;
arg13 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim13_accelerate_kernel +
idx_z * 1 * 1 * xdim13_accelerate_kernel * ydim13_accelerate_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
accelerate_kernel_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8,
arg9, arg10, arg11, arg12, arg13);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_accelerate_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6, ops_arg arg7,
ops_arg arg8, ops_arg arg9, ops_arg arg10,
ops_arg arg11, ops_arg arg12,
ops_arg arg13) {
#else
void ops_par_loop_accelerate_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
ops_arg arg11 = desc->args[11];
ops_arg arg12 = desc->args[12];
ops_arg arg13 = desc->args[13];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[14] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6,
arg7, arg8, arg9, arg10, arg11, arg12, arg13};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 14, range, 104))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(104, "accelerate_kernel");
OPS_kernels[104].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0];
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0];
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0];
int ydim10 = args[10].dat->size[1];
int xdim11 = args[11].dat->size[0];
int ydim11 = args[11].dat->size[1];
int xdim12 = args[12].dat->size[0];
int ydim12 = args[12].dat->size[1];
int xdim13 = args[13].dat->size[0];
int ydim13 = args[13].dat->size[1];
if (xdim0 != xdim0_accelerate_kernel_h ||
ydim0 != ydim0_accelerate_kernel_h ||
xdim1 != xdim1_accelerate_kernel_h ||
ydim1 != ydim1_accelerate_kernel_h ||
xdim2 != xdim2_accelerate_kernel_h ||
ydim2 != ydim2_accelerate_kernel_h ||
xdim3 != xdim3_accelerate_kernel_h ||
ydim3 != ydim3_accelerate_kernel_h ||
xdim4 != xdim4_accelerate_kernel_h ||
ydim4 != ydim4_accelerate_kernel_h ||
xdim5 != xdim5_accelerate_kernel_h ||
ydim5 != ydim5_accelerate_kernel_h ||
xdim6 != xdim6_accelerate_kernel_h ||
ydim6 != ydim6_accelerate_kernel_h ||
xdim7 != xdim7_accelerate_kernel_h ||
ydim7 != ydim7_accelerate_kernel_h ||
xdim8 != xdim8_accelerate_kernel_h ||
ydim8 != ydim8_accelerate_kernel_h ||
xdim9 != xdim9_accelerate_kernel_h ||
ydim9 != ydim9_accelerate_kernel_h ||
xdim10 != xdim10_accelerate_kernel_h ||
ydim10 != ydim10_accelerate_kernel_h ||
xdim11 != xdim11_accelerate_kernel_h ||
ydim11 != ydim11_accelerate_kernel_h ||
xdim12 != xdim12_accelerate_kernel_h ||
ydim12 != ydim12_accelerate_kernel_h ||
xdim13 != xdim13_accelerate_kernel_h ||
ydim13 != ydim13_accelerate_kernel_h) {
hipMemcpyToSymbol(xdim0_accelerate_kernel, &xdim0, sizeof(int));
xdim0_accelerate_kernel_h = xdim0;
hipMemcpyToSymbol(ydim0_accelerate_kernel, &ydim0, sizeof(int));
ydim0_accelerate_kernel_h = ydim0;
hipMemcpyToSymbol(xdim1_accelerate_kernel, &xdim1, sizeof(int));
xdim1_accelerate_kernel_h = xdim1;
hipMemcpyToSymbol(ydim1_accelerate_kernel, &ydim1, sizeof(int));
ydim1_accelerate_kernel_h = ydim1;
hipMemcpyToSymbol(xdim2_accelerate_kernel, &xdim2, sizeof(int));
xdim2_accelerate_kernel_h = xdim2;
hipMemcpyToSymbol(ydim2_accelerate_kernel, &ydim2, sizeof(int));
ydim2_accelerate_kernel_h = ydim2;
hipMemcpyToSymbol(xdim3_accelerate_kernel, &xdim3, sizeof(int));
xdim3_accelerate_kernel_h = xdim3;
hipMemcpyToSymbol(ydim3_accelerate_kernel, &ydim3, sizeof(int));
ydim3_accelerate_kernel_h = ydim3;
hipMemcpyToSymbol(xdim4_accelerate_kernel, &xdim4, sizeof(int));
xdim4_accelerate_kernel_h = xdim4;
hipMemcpyToSymbol(ydim4_accelerate_kernel, &ydim4, sizeof(int));
ydim4_accelerate_kernel_h = ydim4;
hipMemcpyToSymbol(xdim5_accelerate_kernel, &xdim5, sizeof(int));
xdim5_accelerate_kernel_h = xdim5;
hipMemcpyToSymbol(ydim5_accelerate_kernel, &ydim5, sizeof(int));
ydim5_accelerate_kernel_h = ydim5;
hipMemcpyToSymbol(xdim6_accelerate_kernel, &xdim6, sizeof(int));
xdim6_accelerate_kernel_h = xdim6;
hipMemcpyToSymbol(ydim6_accelerate_kernel, &ydim6, sizeof(int));
ydim6_accelerate_kernel_h = ydim6;
hipMemcpyToSymbol(xdim7_accelerate_kernel, &xdim7, sizeof(int));
xdim7_accelerate_kernel_h = xdim7;
hipMemcpyToSymbol(ydim7_accelerate_kernel, &ydim7, sizeof(int));
ydim7_accelerate_kernel_h = ydim7;
hipMemcpyToSymbol(xdim8_accelerate_kernel, &xdim8, sizeof(int));
xdim8_accelerate_kernel_h = xdim8;
hipMemcpyToSymbol(ydim8_accelerate_kernel, &ydim8, sizeof(int));
ydim8_accelerate_kernel_h = ydim8;
hipMemcpyToSymbol(xdim9_accelerate_kernel, &xdim9, sizeof(int));
xdim9_accelerate_kernel_h = xdim9;
hipMemcpyToSymbol(ydim9_accelerate_kernel, &ydim9, sizeof(int));
ydim9_accelerate_kernel_h = ydim9;
hipMemcpyToSymbol(xdim10_accelerate_kernel, &xdim10, sizeof(int));
xdim10_accelerate_kernel_h = xdim10;
hipMemcpyToSymbol(ydim10_accelerate_kernel, &ydim10, sizeof(int));
ydim10_accelerate_kernel_h = ydim10;
hipMemcpyToSymbol(xdim11_accelerate_kernel, &xdim11, sizeof(int));
xdim11_accelerate_kernel_h = xdim11;
hipMemcpyToSymbol(ydim11_accelerate_kernel, &ydim11, sizeof(int));
ydim11_accelerate_kernel_h = ydim11;
hipMemcpyToSymbol(xdim12_accelerate_kernel, &xdim12, sizeof(int));
xdim12_accelerate_kernel_h = xdim12;
hipMemcpyToSymbol(ydim12_accelerate_kernel, &ydim12, sizeof(int));
ydim12_accelerate_kernel_h = ydim12;
hipMemcpyToSymbol(xdim13_accelerate_kernel, &xdim13, sizeof(int));
xdim13_accelerate_kernel_h = xdim13;
hipMemcpyToSymbol(ydim13_accelerate_kernel, &ydim13, sizeof(int));
ydim13_accelerate_kernel_h = ydim13;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size);
int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size);
int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size);
int dat11 = (OPS_soa ? args[11].dat->type_size : args[11].dat->elem_size);
int dat12 = (OPS_soa ? args[12].dat->type_size : args[12].dat->elem_size);
int dat13 = (OPS_soa ? args[13].dat->type_size : args[13].dat->elem_size);
char *p_a[14];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6 +
dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]);
base6 = base6 +
dat6 * args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7 +
dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1]);
base7 = base7 +
dat7 * args[7].dat->size[0] * args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
int base8 = args[8].dat->base_offset +
dat8 * 1 * (start[0] * args[8].stencil->stride[0]);
base8 = base8 +
dat8 * args[8].dat->size[0] * (start[1] * args[8].stencil->stride[1]);
base8 = base8 +
dat8 * args[8].dat->size[0] * args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2]);
p_a[8] = (char *)args[8].data_d + base8;
int base9 = args[9].dat->base_offset +
dat9 * 1 * (start[0] * args[9].stencil->stride[0]);
base9 = base9 +
dat9 * args[9].dat->size[0] * (start[1] * args[9].stencil->stride[1]);
base9 = base9 +
dat9 * args[9].dat->size[0] * args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2]);
p_a[9] = (char *)args[9].data_d + base9;
int base10 = args[10].dat->base_offset +
dat10 * 1 * (start[0] * args[10].stencil->stride[0]);
base10 =
base10 +
dat10 * args[10].dat->size[0] * (start[1] * args[10].stencil->stride[1]);
base10 = base10 +
dat10 * args[10].dat->size[0] * args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2]);
p_a[10] = (char *)args[10].data_d + base10;
int base11 = args[11].dat->base_offset +
dat11 * 1 * (start[0] * args[11].stencil->stride[0]);
base11 =
base11 +
dat11 * args[11].dat->size[0] * (start[1] * args[11].stencil->stride[1]);
base11 = base11 +
dat11 * args[11].dat->size[0] * args[11].dat->size[1] *
(start[2] * args[11].stencil->stride[2]);
p_a[11] = (char *)args[11].data_d + base11;
int base12 = args[12].dat->base_offset +
dat12 * 1 * (start[0] * args[12].stencil->stride[0]);
base12 =
base12 +
dat12 * args[12].dat->size[0] * (start[1] * args[12].stencil->stride[1]);
base12 = base12 +
dat12 * args[12].dat->size[0] * args[12].dat->size[1] *
(start[2] * args[12].stencil->stride[2]);
p_a[12] = (char *)args[12].data_d + base12;
int base13 = args[13].dat->base_offset +
dat13 * 1 * (start[0] * args[13].stencil->stride[0]);
base13 =
base13 +
dat13 * args[13].dat->size[0] * (start[1] * args[13].stencil->stride[1]);
base13 = base13 +
dat13 * args[13].dat->size[0] * args[13].dat->size[1] *
(start[2] * args[13].stencil->stride[2]);
p_a[13] = (char *)args[13].data_d + base13;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 14);
ops_halo_exchanges(args, 14, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[104].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_accelerate_kernel), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9], (double *)p_a[10], (double *)p_a[11],
(double *)p_a[12], (double *)p_a[13], x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[104].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 14);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[8], range);
ops_set_halo_dirtybit3(&args[12], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[104].mpi_time += t2 - t1;
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg10);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg11);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg12);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg13);
}
}
#ifdef OPS_LAZY
void ops_par_loop_accelerate_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6, ops_arg arg7,
ops_arg arg8, ops_arg arg9, ops_arg arg10,
ops_arg arg11, ops_arg arg12,
ops_arg arg13) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 104;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 104;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 14;
desc->args = (ops_arg *)malloc(14 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->args[8] = arg8;
desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index;
desc->args[9] = arg9;
desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index;
desc->args[10] = arg10;
desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index;
desc->args[11] = arg11;
desc->hash = ((desc->hash << 5) + desc->hash) + arg11.dat->index;
desc->args[12] = arg12;
desc->hash = ((desc->hash << 5) + desc->hash) + arg12.dat->index;
desc->args[13] = arg13;
desc->hash = ((desc->hash << 5) + desc->hash) + arg13.dat->index;
desc->function = ops_par_loop_accelerate_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(104, "accelerate_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
42ec1e8090f62d6a9fc7e4df990e0ec715cc2e8e.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_accelerate_kernel;
int xdim0_accelerate_kernel_h = -1;
__constant__ int ydim0_accelerate_kernel;
int ydim0_accelerate_kernel_h = -1;
__constant__ int xdim1_accelerate_kernel;
int xdim1_accelerate_kernel_h = -1;
__constant__ int ydim1_accelerate_kernel;
int ydim1_accelerate_kernel_h = -1;
__constant__ int xdim2_accelerate_kernel;
int xdim2_accelerate_kernel_h = -1;
__constant__ int ydim2_accelerate_kernel;
int ydim2_accelerate_kernel_h = -1;
__constant__ int xdim3_accelerate_kernel;
int xdim3_accelerate_kernel_h = -1;
__constant__ int ydim3_accelerate_kernel;
int ydim3_accelerate_kernel_h = -1;
__constant__ int xdim4_accelerate_kernel;
int xdim4_accelerate_kernel_h = -1;
__constant__ int ydim4_accelerate_kernel;
int ydim4_accelerate_kernel_h = -1;
__constant__ int xdim5_accelerate_kernel;
int xdim5_accelerate_kernel_h = -1;
__constant__ int ydim5_accelerate_kernel;
int ydim5_accelerate_kernel_h = -1;
__constant__ int xdim6_accelerate_kernel;
int xdim6_accelerate_kernel_h = -1;
__constant__ int ydim6_accelerate_kernel;
int ydim6_accelerate_kernel_h = -1;
__constant__ int xdim7_accelerate_kernel;
int xdim7_accelerate_kernel_h = -1;
__constant__ int ydim7_accelerate_kernel;
int ydim7_accelerate_kernel_h = -1;
__constant__ int xdim8_accelerate_kernel;
int xdim8_accelerate_kernel_h = -1;
__constant__ int ydim8_accelerate_kernel;
int ydim8_accelerate_kernel_h = -1;
__constant__ int xdim9_accelerate_kernel;
int xdim9_accelerate_kernel_h = -1;
__constant__ int ydim9_accelerate_kernel;
int ydim9_accelerate_kernel_h = -1;
__constant__ int xdim10_accelerate_kernel;
int xdim10_accelerate_kernel_h = -1;
__constant__ int ydim10_accelerate_kernel;
int ydim10_accelerate_kernel_h = -1;
__constant__ int xdim11_accelerate_kernel;
int xdim11_accelerate_kernel_h = -1;
__constant__ int ydim11_accelerate_kernel;
int ydim11_accelerate_kernel_h = -1;
__constant__ int xdim12_accelerate_kernel;
int xdim12_accelerate_kernel_h = -1;
__constant__ int ydim12_accelerate_kernel;
int ydim12_accelerate_kernel_h = -1;
__constant__ int xdim13_accelerate_kernel;
int xdim13_accelerate_kernel_h = -1;
__constant__ int ydim13_accelerate_kernel;
int ydim13_accelerate_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
#undef OPS_ACC11
#undef OPS_ACC12
#undef OPS_ACC13
#define OPS_ACC0(x, y, z) \
(x + xdim0_accelerate_kernel * (y) + \
xdim0_accelerate_kernel * ydim0_accelerate_kernel * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_accelerate_kernel * (y) + \
xdim1_accelerate_kernel * ydim1_accelerate_kernel * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_accelerate_kernel * (y) + \
xdim2_accelerate_kernel * ydim2_accelerate_kernel * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_accelerate_kernel * (y) + \
xdim3_accelerate_kernel * ydim3_accelerate_kernel * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_accelerate_kernel * (y) + \
xdim4_accelerate_kernel * ydim4_accelerate_kernel * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_accelerate_kernel * (y) + \
xdim5_accelerate_kernel * ydim5_accelerate_kernel * (z))
#define OPS_ACC6(x, y, z) \
(x + xdim6_accelerate_kernel * (y) + \
xdim6_accelerate_kernel * ydim6_accelerate_kernel * (z))
#define OPS_ACC7(x, y, z) \
(x + xdim7_accelerate_kernel * (y) + \
xdim7_accelerate_kernel * ydim7_accelerate_kernel * (z))
#define OPS_ACC8(x, y, z) \
(x + xdim8_accelerate_kernel * (y) + \
xdim8_accelerate_kernel * ydim8_accelerate_kernel * (z))
#define OPS_ACC9(x, y, z) \
(x + xdim9_accelerate_kernel * (y) + \
xdim9_accelerate_kernel * ydim9_accelerate_kernel * (z))
#define OPS_ACC10(x, y, z) \
(x + xdim10_accelerate_kernel * (y) + \
xdim10_accelerate_kernel * ydim10_accelerate_kernel * (z))
#define OPS_ACC11(x, y, z) \
(x + xdim11_accelerate_kernel * (y) + \
xdim11_accelerate_kernel * ydim11_accelerate_kernel * (z))
#define OPS_ACC12(x, y, z) \
(x + xdim12_accelerate_kernel * (y) + \
xdim12_accelerate_kernel * ydim12_accelerate_kernel * (z))
#define OPS_ACC13(x, y, z) \
(x + xdim13_accelerate_kernel * (y) + \
xdim13_accelerate_kernel * ydim13_accelerate_kernel * (z))
// user function
__device__
void
accelerate_kernel_gpu(const double *density0, const double *volume,
double *stepbymass, const double *xvel0,
double *xvel1, const double *xarea,
const double *pressure, const double *yvel0,
double *yvel1, const double *yarea,
const double *viscosity, const double *zvel0,
double *zvel1, const double *zarea) {
double nodal_mass = 0.0;
nodal_mass = (density0[OPS_ACC0(-1, -1, 0)] * volume[OPS_ACC1(-1, -1, 0)] +
density0[OPS_ACC0(0, -1, 0)] * volume[OPS_ACC1(0, -1, 0)] +
density0[OPS_ACC0(0, 0, 0)] * volume[OPS_ACC1(0, 0, 0)] +
density0[OPS_ACC0(-1, 0, 0)] * volume[OPS_ACC1(-1, 0, 0)] +
density0[OPS_ACC0(-1, -1, -1)] * volume[OPS_ACC1(-1, -1, -1)] +
density0[OPS_ACC0(0, -1, -1)] * volume[OPS_ACC1(0, -1, -1)] +
density0[OPS_ACC0(0, 0, -1)] * volume[OPS_ACC1(0, 0, -1)] +
density0[OPS_ACC0(-1, 0, -1)] * volume[OPS_ACC1(-1, 0, -1)]) *
0.125;
stepbymass[OPS_ACC2(0, 0, 0)] = 0.25 * dt / nodal_mass;
xvel1[OPS_ACC4(0, 0, 0)] =
xvel0[OPS_ACC3(0, 0, 0)] -
stepbymass[OPS_ACC2(0, 0, 0)] *
(xarea[OPS_ACC5(0, 0, 0)] *
(pressure[OPS_ACC6(0, 0, 0)] - pressure[OPS_ACC6(-1, 0, 0)]) +
xarea[OPS_ACC5(0, -1, 0)] *
(pressure[OPS_ACC6(0, -1, 0)] - pressure[OPS_ACC6(-1, -1, 0)]) +
xarea[OPS_ACC5(0, 0, -1)] *
(pressure[OPS_ACC6(0, 0, -1)] - pressure[OPS_ACC6(-1, 0, -1)]) +
xarea[OPS_ACC5(0, -1, -1)] * (pressure[OPS_ACC6(0, -1, -1)] -
pressure[OPS_ACC6(-1, -1, -1)]));
yvel1[OPS_ACC8(0, 0, 0)] =
yvel0[OPS_ACC7(0, 0, 0)] -
stepbymass[OPS_ACC2(0, 0, 0)] *
(yarea[OPS_ACC9(0, 0, 0)] *
(pressure[OPS_ACC6(0, 0, 0)] - pressure[OPS_ACC6(0, -1, 0)]) +
yarea[OPS_ACC9(-1, 0, 0)] *
(pressure[OPS_ACC6(-1, 0, 0)] - pressure[OPS_ACC6(-1, -1, 0)]) +
yarea[OPS_ACC9(0, 0, -1)] *
(pressure[OPS_ACC6(0, 0, -1)] - pressure[OPS_ACC6(0, -1, -1)]) +
yarea[OPS_ACC9(-1, 0, -1)] * (pressure[OPS_ACC6(-1, 0, -1)] -
pressure[OPS_ACC6(-1, -1, -1)]));
zvel1[OPS_ACC12(0, 0, 0)] =
zvel0[OPS_ACC11(0, 0, 0)] -
stepbymass[OPS_ACC2(0, 0, 0)] *
(zarea[OPS_ACC13(0, 0, 0)] *
(pressure[OPS_ACC6(0, 0, 0)] - pressure[OPS_ACC6(0, 0, -1)]) +
zarea[OPS_ACC13(0, -1, 0)] *
(pressure[OPS_ACC6(0, -1, 0)] - pressure[OPS_ACC6(0, -1, -1)]) +
zarea[OPS_ACC13(-1, 0, 0)] *
(pressure[OPS_ACC6(-1, 0, 0)] - pressure[OPS_ACC6(-1, 0, -1)]) +
zarea[OPS_ACC13(-1, -1, 0)] * (pressure[OPS_ACC6(-1, -1, 0)] -
pressure[OPS_ACC6(-1, -1, -1)]));
xvel1[OPS_ACC4(0, 0, 0)] =
xvel1[OPS_ACC4(0, 0, 0)] -
stepbymass[OPS_ACC2(0, 0, 0)] *
(xarea[OPS_ACC5(0, 0, 0)] * (viscosity[OPS_ACC10(0, 0, 0)] -
viscosity[OPS_ACC10(-1, 0, 0)]) +
xarea[OPS_ACC5(0, -1, 0)] * (viscosity[OPS_ACC10(0, -1, 0)] -
viscosity[OPS_ACC10(-1, -1, 0)]) +
xarea[OPS_ACC5(0, 0, -1)] * (viscosity[OPS_ACC10(0, 0, -1)] -
viscosity[OPS_ACC10(-1, 0, -1)]) +
xarea[OPS_ACC5(0, -1, -1)] * (viscosity[OPS_ACC10(0, -1, -1)] -
viscosity[OPS_ACC10(-1, -1, -1)]));
yvel1[OPS_ACC8(0, 0, 0)] =
yvel1[OPS_ACC8(0, 0, 0)] -
stepbymass[OPS_ACC2(0, 0, 0)] *
(yarea[OPS_ACC9(0, 0, 0)] * (viscosity[OPS_ACC10(0, 0, 0)] -
viscosity[OPS_ACC10(0, -1, 0)]) +
yarea[OPS_ACC9(-1, 0, 0)] * (viscosity[OPS_ACC10(-1, 0, 0)] -
viscosity[OPS_ACC10(-1, -1, 0)]) +
yarea[OPS_ACC9(0, 0, -1)] * (viscosity[OPS_ACC10(0, 0, -1)] -
viscosity[OPS_ACC10(0, -1, -1)]) +
yarea[OPS_ACC9(-1, 0, -1)] * (viscosity[OPS_ACC10(-1, 0, -1)] -
viscosity[OPS_ACC10(-1, -1, -1)]));
zvel1[OPS_ACC12(0, 0, 0)] =
zvel1[OPS_ACC12(0, 0, 0)] -
stepbymass[OPS_ACC2(0, 0, 0)] *
(zarea[OPS_ACC13(0, 0, 0)] * (viscosity[OPS_ACC10(0, 0, 0)] -
viscosity[OPS_ACC10(0, 0, -1)]) +
zarea[OPS_ACC13(0, -1, 0)] * (viscosity[OPS_ACC10(0, -1, 0)] -
viscosity[OPS_ACC10(0, -1, -1)]) +
zarea[OPS_ACC13(-1, 0, 0)] * (viscosity[OPS_ACC10(-1, 0, 0)] -
viscosity[OPS_ACC10(-1, 0, -1)]) +
zarea[OPS_ACC13(-1, -1, 0)] * (viscosity[OPS_ACC10(-1, -1, 0)] -
viscosity[OPS_ACC10(-1, -1, -1)]));
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
#undef OPS_ACC11
#undef OPS_ACC12
#undef OPS_ACC13
__global__ void ops_accelerate_kernel(
const double *__restrict arg0, const double *__restrict arg1,
double *__restrict arg2, const double *__restrict arg3,
double *__restrict arg4, const double *__restrict arg5,
const double *__restrict arg6, const double *__restrict arg7,
double *__restrict arg8, const double *__restrict arg9,
const double *__restrict arg10, const double *__restrict arg11,
double *__restrict arg12, const double *__restrict arg13, int size0,
int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_accelerate_kernel +
idx_z * 1 * 1 * xdim0_accelerate_kernel * ydim0_accelerate_kernel;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_accelerate_kernel +
idx_z * 1 * 1 * xdim1_accelerate_kernel * ydim1_accelerate_kernel;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_accelerate_kernel +
idx_z * 1 * 1 * xdim2_accelerate_kernel * ydim2_accelerate_kernel;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_accelerate_kernel +
idx_z * 1 * 1 * xdim3_accelerate_kernel * ydim3_accelerate_kernel;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_accelerate_kernel +
idx_z * 1 * 1 * xdim4_accelerate_kernel * ydim4_accelerate_kernel;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_accelerate_kernel +
idx_z * 1 * 1 * xdim5_accelerate_kernel * ydim5_accelerate_kernel;
arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_accelerate_kernel +
idx_z * 1 * 1 * xdim6_accelerate_kernel * ydim6_accelerate_kernel;
arg7 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim7_accelerate_kernel +
idx_z * 1 * 1 * xdim7_accelerate_kernel * ydim7_accelerate_kernel;
arg8 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim8_accelerate_kernel +
idx_z * 1 * 1 * xdim8_accelerate_kernel * ydim8_accelerate_kernel;
arg9 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim9_accelerate_kernel +
idx_z * 1 * 1 * xdim9_accelerate_kernel * ydim9_accelerate_kernel;
arg10 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim10_accelerate_kernel +
idx_z * 1 * 1 * xdim10_accelerate_kernel * ydim10_accelerate_kernel;
arg11 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim11_accelerate_kernel +
idx_z * 1 * 1 * xdim11_accelerate_kernel * ydim11_accelerate_kernel;
arg12 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim12_accelerate_kernel +
idx_z * 1 * 1 * xdim12_accelerate_kernel * ydim12_accelerate_kernel;
arg13 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim13_accelerate_kernel +
idx_z * 1 * 1 * xdim13_accelerate_kernel * ydim13_accelerate_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
accelerate_kernel_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8,
arg9, arg10, arg11, arg12, arg13);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_accelerate_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6, ops_arg arg7,
ops_arg arg8, ops_arg arg9, ops_arg arg10,
ops_arg arg11, ops_arg arg12,
ops_arg arg13) {
#else
void ops_par_loop_accelerate_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
ops_arg arg11 = desc->args[11];
ops_arg arg12 = desc->args[12];
ops_arg arg13 = desc->args[13];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[14] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6,
arg7, arg8, arg9, arg10, arg11, arg12, arg13};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 14, range, 104))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(104, "accelerate_kernel");
OPS_kernels[104].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0];
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0];
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0];
int ydim10 = args[10].dat->size[1];
int xdim11 = args[11].dat->size[0];
int ydim11 = args[11].dat->size[1];
int xdim12 = args[12].dat->size[0];
int ydim12 = args[12].dat->size[1];
int xdim13 = args[13].dat->size[0];
int ydim13 = args[13].dat->size[1];
if (xdim0 != xdim0_accelerate_kernel_h ||
ydim0 != ydim0_accelerate_kernel_h ||
xdim1 != xdim1_accelerate_kernel_h ||
ydim1 != ydim1_accelerate_kernel_h ||
xdim2 != xdim2_accelerate_kernel_h ||
ydim2 != ydim2_accelerate_kernel_h ||
xdim3 != xdim3_accelerate_kernel_h ||
ydim3 != ydim3_accelerate_kernel_h ||
xdim4 != xdim4_accelerate_kernel_h ||
ydim4 != ydim4_accelerate_kernel_h ||
xdim5 != xdim5_accelerate_kernel_h ||
ydim5 != ydim5_accelerate_kernel_h ||
xdim6 != xdim6_accelerate_kernel_h ||
ydim6 != ydim6_accelerate_kernel_h ||
xdim7 != xdim7_accelerate_kernel_h ||
ydim7 != ydim7_accelerate_kernel_h ||
xdim8 != xdim8_accelerate_kernel_h ||
ydim8 != ydim8_accelerate_kernel_h ||
xdim9 != xdim9_accelerate_kernel_h ||
ydim9 != ydim9_accelerate_kernel_h ||
xdim10 != xdim10_accelerate_kernel_h ||
ydim10 != ydim10_accelerate_kernel_h ||
xdim11 != xdim11_accelerate_kernel_h ||
ydim11 != ydim11_accelerate_kernel_h ||
xdim12 != xdim12_accelerate_kernel_h ||
ydim12 != ydim12_accelerate_kernel_h ||
xdim13 != xdim13_accelerate_kernel_h ||
ydim13 != ydim13_accelerate_kernel_h) {
cudaMemcpyToSymbol(xdim0_accelerate_kernel, &xdim0, sizeof(int));
xdim0_accelerate_kernel_h = xdim0;
cudaMemcpyToSymbol(ydim0_accelerate_kernel, &ydim0, sizeof(int));
ydim0_accelerate_kernel_h = ydim0;
cudaMemcpyToSymbol(xdim1_accelerate_kernel, &xdim1, sizeof(int));
xdim1_accelerate_kernel_h = xdim1;
cudaMemcpyToSymbol(ydim1_accelerate_kernel, &ydim1, sizeof(int));
ydim1_accelerate_kernel_h = ydim1;
cudaMemcpyToSymbol(xdim2_accelerate_kernel, &xdim2, sizeof(int));
xdim2_accelerate_kernel_h = xdim2;
cudaMemcpyToSymbol(ydim2_accelerate_kernel, &ydim2, sizeof(int));
ydim2_accelerate_kernel_h = ydim2;
cudaMemcpyToSymbol(xdim3_accelerate_kernel, &xdim3, sizeof(int));
xdim3_accelerate_kernel_h = xdim3;
cudaMemcpyToSymbol(ydim3_accelerate_kernel, &ydim3, sizeof(int));
ydim3_accelerate_kernel_h = ydim3;
cudaMemcpyToSymbol(xdim4_accelerate_kernel, &xdim4, sizeof(int));
xdim4_accelerate_kernel_h = xdim4;
cudaMemcpyToSymbol(ydim4_accelerate_kernel, &ydim4, sizeof(int));
ydim4_accelerate_kernel_h = ydim4;
cudaMemcpyToSymbol(xdim5_accelerate_kernel, &xdim5, sizeof(int));
xdim5_accelerate_kernel_h = xdim5;
cudaMemcpyToSymbol(ydim5_accelerate_kernel, &ydim5, sizeof(int));
ydim5_accelerate_kernel_h = ydim5;
cudaMemcpyToSymbol(xdim6_accelerate_kernel, &xdim6, sizeof(int));
xdim6_accelerate_kernel_h = xdim6;
cudaMemcpyToSymbol(ydim6_accelerate_kernel, &ydim6, sizeof(int));
ydim6_accelerate_kernel_h = ydim6;
cudaMemcpyToSymbol(xdim7_accelerate_kernel, &xdim7, sizeof(int));
xdim7_accelerate_kernel_h = xdim7;
cudaMemcpyToSymbol(ydim7_accelerate_kernel, &ydim7, sizeof(int));
ydim7_accelerate_kernel_h = ydim7;
cudaMemcpyToSymbol(xdim8_accelerate_kernel, &xdim8, sizeof(int));
xdim8_accelerate_kernel_h = xdim8;
cudaMemcpyToSymbol(ydim8_accelerate_kernel, &ydim8, sizeof(int));
ydim8_accelerate_kernel_h = ydim8;
cudaMemcpyToSymbol(xdim9_accelerate_kernel, &xdim9, sizeof(int));
xdim9_accelerate_kernel_h = xdim9;
cudaMemcpyToSymbol(ydim9_accelerate_kernel, &ydim9, sizeof(int));
ydim9_accelerate_kernel_h = ydim9;
cudaMemcpyToSymbol(xdim10_accelerate_kernel, &xdim10, sizeof(int));
xdim10_accelerate_kernel_h = xdim10;
cudaMemcpyToSymbol(ydim10_accelerate_kernel, &ydim10, sizeof(int));
ydim10_accelerate_kernel_h = ydim10;
cudaMemcpyToSymbol(xdim11_accelerate_kernel, &xdim11, sizeof(int));
xdim11_accelerate_kernel_h = xdim11;
cudaMemcpyToSymbol(ydim11_accelerate_kernel, &ydim11, sizeof(int));
ydim11_accelerate_kernel_h = ydim11;
cudaMemcpyToSymbol(xdim12_accelerate_kernel, &xdim12, sizeof(int));
xdim12_accelerate_kernel_h = xdim12;
cudaMemcpyToSymbol(ydim12_accelerate_kernel, &ydim12, sizeof(int));
ydim12_accelerate_kernel_h = ydim12;
cudaMemcpyToSymbol(xdim13_accelerate_kernel, &xdim13, sizeof(int));
xdim13_accelerate_kernel_h = xdim13;
cudaMemcpyToSymbol(ydim13_accelerate_kernel, &ydim13, sizeof(int));
ydim13_accelerate_kernel_h = ydim13;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size);
int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size);
int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size);
int dat11 = (OPS_soa ? args[11].dat->type_size : args[11].dat->elem_size);
int dat12 = (OPS_soa ? args[12].dat->type_size : args[12].dat->elem_size);
int dat13 = (OPS_soa ? args[13].dat->type_size : args[13].dat->elem_size);
char *p_a[14];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6 +
dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]);
base6 = base6 +
dat6 * args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7 +
dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1]);
base7 = base7 +
dat7 * args[7].dat->size[0] * args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
int base8 = args[8].dat->base_offset +
dat8 * 1 * (start[0] * args[8].stencil->stride[0]);
base8 = base8 +
dat8 * args[8].dat->size[0] * (start[1] * args[8].stencil->stride[1]);
base8 = base8 +
dat8 * args[8].dat->size[0] * args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2]);
p_a[8] = (char *)args[8].data_d + base8;
int base9 = args[9].dat->base_offset +
dat9 * 1 * (start[0] * args[9].stencil->stride[0]);
base9 = base9 +
dat9 * args[9].dat->size[0] * (start[1] * args[9].stencil->stride[1]);
base9 = base9 +
dat9 * args[9].dat->size[0] * args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2]);
p_a[9] = (char *)args[9].data_d + base9;
int base10 = args[10].dat->base_offset +
dat10 * 1 * (start[0] * args[10].stencil->stride[0]);
base10 =
base10 +
dat10 * args[10].dat->size[0] * (start[1] * args[10].stencil->stride[1]);
base10 = base10 +
dat10 * args[10].dat->size[0] * args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2]);
p_a[10] = (char *)args[10].data_d + base10;
int base11 = args[11].dat->base_offset +
dat11 * 1 * (start[0] * args[11].stencil->stride[0]);
base11 =
base11 +
dat11 * args[11].dat->size[0] * (start[1] * args[11].stencil->stride[1]);
base11 = base11 +
dat11 * args[11].dat->size[0] * args[11].dat->size[1] *
(start[2] * args[11].stencil->stride[2]);
p_a[11] = (char *)args[11].data_d + base11;
int base12 = args[12].dat->base_offset +
dat12 * 1 * (start[0] * args[12].stencil->stride[0]);
base12 =
base12 +
dat12 * args[12].dat->size[0] * (start[1] * args[12].stencil->stride[1]);
base12 = base12 +
dat12 * args[12].dat->size[0] * args[12].dat->size[1] *
(start[2] * args[12].stencil->stride[2]);
p_a[12] = (char *)args[12].data_d + base12;
int base13 = args[13].dat->base_offset +
dat13 * 1 * (start[0] * args[13].stencil->stride[0]);
base13 =
base13 +
dat13 * args[13].dat->size[0] * (start[1] * args[13].stencil->stride[1]);
base13 = base13 +
dat13 * args[13].dat->size[0] * args[13].dat->size[1] *
(start[2] * args[13].stencil->stride[2]);
p_a[13] = (char *)args[13].data_d + base13;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 14);
ops_halo_exchanges(args, 14, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[104].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_accelerate_kernel<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9], (double *)p_a[10], (double *)p_a[11],
(double *)p_a[12], (double *)p_a[13], x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[104].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 14);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[8], range);
ops_set_halo_dirtybit3(&args[12], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[104].mpi_time += t2 - t1;
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg10);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg11);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg12);
OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg13);
}
}
#ifdef OPS_LAZY
void ops_par_loop_accelerate_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6, ops_arg arg7,
ops_arg arg8, ops_arg arg9, ops_arg arg10,
ops_arg arg11, ops_arg arg12,
ops_arg arg13) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 104;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 104;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 14;
desc->args = (ops_arg *)malloc(14 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->args[8] = arg8;
desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index;
desc->args[9] = arg9;
desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index;
desc->args[10] = arg10;
desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index;
desc->args[11] = arg11;
desc->hash = ((desc->hash << 5) + desc->hash) + arg11.dat->index;
desc->args[12] = arg12;
desc->hash = ((desc->hash << 5) + desc->hash) + arg12.dat->index;
desc->args[13] = arg13;
desc->hash = ((desc->hash << 5) + desc->hash) + arg13.dat->index;
desc->function = ops_par_loop_accelerate_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(104, "accelerate_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
5c121efcd9c034e62dc18c16b3e8a535296a9cd7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <map>
#include <deque>
#include <vector>
#include <cmath>
#include <sstream>
#include <fstream>
#include <iostream>
#include <algorithm>
#include <sys/time.h>
#ifndef __HIPCC__
#define XCPU
#elif __CUDA_ARCH__ >= 120
#define USMA
#endif
#ifdef XCPU
#include <cmath>
#include <cstring>
#endif
using namespace std;
namespace xppc{
#include "ini.cxx"
#include "pro.cu"
void initialize(float enh = 1.f){
m.set();
d.eff *= enh;
}
unsigned int pmax, pmxo, pn;
#ifdef XCPU
dats *e; // pointer to a copy of "d" on device
int numBlock, threadPerBlock, ntot;
void ini(int type){
rs_ini();
pn=0;
ntot=numBlock*threadPerBlock;
pmax=ntot*NPHO;
pmxo=pmax/OVER;
pmax=pmxo*OVER;
d.hnum=pmax/HQUO;
{
d.hits = q.hits = new hit[d.hnum];
if(type==0) d.pz = q.pz = new photon[pmxo];
#ifdef TALL
d.bf = new pbuf[pmax];
#endif
}
{
d.z=&z; e=&d; oms=q.oms;
}
{
unsigned int size=d.rsize, need=seed+1;
if(size<need) cerr<<"Error: not enough multipliers: asked for "<<seed<<"-th out of "<<size<<"!"<<endl;
}
}
void fin(){
if(d.type==0) delete d.pz;
delete d.hits;
#ifdef TALL
delete d.bf;
#endif
}
#else
bool xgpu=false;
void checkError(hipError_t result){
if(result!=hipSuccess){
cerr<<"CUDA Error: "<<hipGetErrorString(result)<<endl;
exit(2);
}
}
struct gpu{
dats d;
dats *e; // pointer to a copy of "d" on device
int device;
int numBlock, threadPerBlock, ntot; // total threads in parallel
unsigned int npho, pmax, pmxo;
float dt, deviceTime, threadMin, threadMax;
hipDeviceProp_t prop;
hipStream_t stream;
hipEvent_t evt1, evt2;
unsigned int old, num;
gpu(int device) : deviceTime(0), threadMin(0), threadMax(0), old(0), npho(NPHO){
this->device=device;
{
ostringstream o; o<<"NPHO_"<<device;
char * nph=getenv(o.str().c_str());
if(nph==NULL) nph=getenv("NPHO");
if(nph!=NULL) if(*nph!=0){
npho=atoi(nph);
cerr<<"Setting NPHO="<<npho<<endl;
if(npho<=0){
cerr<<"Not using device # "<<device<<"!"<<endl;
return;
}
}
}
checkError(hipSetDevice(device));
checkError(hipGetDeviceProperties(&prop, device));
#if CUDART_VERSION >= 3000
checkError(hipFuncSetCacheConfig(propagate, hipFuncCachePreferL1));
#endif
hipFuncAttributes attr;
checkError(hipFuncGetAttributes (&attr, propagate));
numBlock = prop.multiProcessorCount;
threadPerBlock = attr.maxThreadsPerBlock;
// threadPerBlock = 512;
// Change copy process?
// numBlock = prop.multiProcessorCount * 2;
// threadPerBlock = 256;
cerr << "Running on " << numBlock << " blocks x " << threadPerBlock << " threads" << endl;
fprintf(stderr, "Kernel uses: l=%lu r=%d s=%lu c=%lu\n", (unsigned long)attr.localSizeBytes,
attr.numRegs, (unsigned long)attr.sharedSizeBytes, (unsigned long)attr.constSizeBytes);
}
void ini(int type){
// init the random number generator
rs_ini();
d = xppc::d;
// Check BADMP
{
d.blockIdx = -1, d.gridDim=numBlock;
ostringstream o; o<<"BADMP_"<<device;
char * bmp=getenv(o.str().c_str());
if(bmp==NULL) bmp=getenv("BADMP");
if(bmp!=NULL) if(*bmp!=0){
d.blockIdx=atoi(bmp), d.gridDim--;
cerr<<"Not using MP #"<<d.blockIdx<<endl;
}
}
ntot = numBlock * threadPerBlock;
{
unsigned long xmem = prop.totalGlobalMem;
while(npho>1){
pmax = ntot * npho;
pmxo = pmax / OVER;
pmax = pmxo*OVER;
d.hnum = pmax/HQUO; // save at most photon
unsigned long mtot = sizeof(datz) + sizeof(dats) + d.gsize * sizeof(DOM);
mtot += +d.hnum*sizeof(hit);
if(d.type==0) mtot += pmxo*sizeof(photon);
#ifdef TALL
mtot += pmax*sizeof(pbuf);
#endif
if(mtot > xmem) npho/=2; else break;
}
}
{
checkError(hipStreamCreate(&stream));
checkError(hipEventCreateWithFlags(&evt1, hipEventBlockingSync));
checkError(hipEventCreateWithFlags(&evt2, hipEventBlockingSync));
}
{
unsigned int size=d.rsize;
if(size<ntot) cerr<<"Error: not enough multipliers: only have "<<size<<" (need "<<ntot<<")!"<<endl;
else d.rsize=ntot;
}
unsigned long tot=0, cnt=0;
{
unsigned long size=sizeof(datz); tot+=size;
checkError(hipMalloc((void**) &d.z, size));
checkError(hipMemcpy(d.z, &z, size, hipMemcpyHostToDevice));
}
{
unsigned long size=d.hnum*sizeof(hit); tot+=size;
checkError(hipMalloc((void**) &d.hits, size));
}
if(d.type==0){
unsigned long size=pmxo*sizeof(photon); tot+=size;
checkError(hipMalloc((void**) &d.pz, size));
}
#ifdef TALL
{
unsigned long size=pmax*sizeof(pbuf); tot+=size;
checkError(hipMalloc((void**) &d.bf, size));
}
#endif
{
unsigned long size=d.gsize*sizeof(DOM); cnt+=size;
checkError(hipMemcpyToSymbol(oms, q.oms, size));
}
{
unsigned long size=sizeof(dats); tot+=size;
checkError(hipMalloc((void**) &e, size));
checkError(hipMemcpy(e, &d, size, hipMemcpyHostToDevice));
}
cerr << "Total GPU memory usage: "<< tot << " const: " << cnt << " (npho="<<npho<<")"<<endl;
}
void fin(){
checkError(hipFree(d.z));
checkError(hipFree(d.hits));
if(d.type==0) checkError(hipFree(d.pz));
#ifdef TALL
checkError(hipFree(d.bf));
#endif
checkError(hipFree(e));
checkError(hipEventDestroy(evt1));
checkError(hipEventDestroy(evt2));
checkError(hipStreamDestroy(stream));
// closeFile();
}
void set(){
if(xgpu) checkError(hipSetDevice(device));
}
void kernel_i(){
{
checkError(hipStreamSynchronize(stream));
checkError(hipMemcpy(&d, e, 7*sizeof(int), hipMemcpyDeviceToHost));
checkError(hipEventElapsedTime(&dt, evt1, evt2)); deviceTime+=dt;
if(d.ab>0){
cerr<<"Error: TOT was a nan or an inf "<<d.ab<<" times! Bad GPU "<<device<<" MP";
for(int i=0; i<min(d.ab, 4); i++) cerr<<" #"<<d.bmp[i]; cerr<<endl;
}
if(d.mp!=d.gridDim){ cerr<<"Error: did not encounter MP #"<<d.blockIdx<<endl; exit(4); }
if(threadMax!=-1){
if((unsigned long long)(dt*prop.clockRate)<0x100000000ULL){
threadMin+=d.tn/(float)prop.clockRate;
threadMax+=d.tx/(float)prop.clockRate;
}
else threadMin=-1, threadMax=-1;
}
if(d.hidx>=d.hnum){ d.hidx=d.hnum; cerr<<"Error: data buffer overflow occurred!"<<endl; }
}
{
unsigned int size=d.hidx*sizeof(hit);
checkError(hipMemcpyAsync(&q.hits[xppc::d.hidx], d.hits, size, hipMemcpyDeviceToHost, stream));
xppc::d.hidx+=d.hidx;
}
}
void kernel_c(unsigned int & idx){
if(old>0) checkError(hipStreamSynchronize(stream));
unsigned int pn=num/OVER;
unsigned int size=pn*sizeof(photon);
checkError(hipMemcpyAsync(d.pz, &q.pz[idx], size, hipMemcpyHostToDevice, stream));
idx+=pn;
}
void kernel_f(){
checkError(hipStreamSynchronize(stream));
if(num>0){
checkError(hipEventRecord(evt1, stream));
hipLaunchKernelGGL(( propagate), dim3(1), dim3(1), 0, stream , e, 0);
checkError(hipGetLastError());
// propagate<<< numBlock, threadPerBlock, 0, stream >>>(e, num);
hipLaunchKernelGGL(( propagate), dim3(numBlock), dim3(threadPerBlock), 0, stream , e, num);
checkError(hipGetLastError());
checkError(hipEventRecord(evt2, stream));
// checkError(hipEventSynchronize(evt2));
}
}
void stop(){
fprintf(stderr, "Device time: %2.1f (in-kernel: %2.1f...%2.1f) [ms]\n", deviceTime, threadMin, threadMax);
checkError(hipDeviceReset());
}
};
vector<gpu> gpus;
void ini(int type){
// init the size of hit buffer
d.hnum=0;
pmax=0, pmxo=0, pn=0;
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++){
i->set();
i->ini(type); if(xgpu) sv++;
d.hnum+=i->d.hnum;
pmax+=i->pmax, pmxo+=i->pmxo;
}
{
unsigned long size=d.hnum*sizeof(hit);
checkError(hipHostMalloc((void**) &q.hits, size));
}
if(d.type==0){
unsigned long size=pmxo*sizeof(photon);
checkError(hipHostMalloc((void**) &q.pz, size));
}
}
void fin(){
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++) i->set(), i->fin();
checkError(hipHostFree(q.hits));
if(d.type==0) checkError(hipHostFree(q.pz));
}
void listDevices(){
int deviceCount, driver, runtime;
hipGetDeviceCount(&deviceCount);
hipDriverGetVersion(&driver);
hipRuntimeGetVersion(&runtime);
fprintf(stderr, "Found %d devices, driver %d, runtime %d\n", deviceCount, driver, runtime);
for(int device=0; device<deviceCount; ++device){
hipDeviceProp_t prop; hipGetDeviceProperties(&prop, device);
fprintf(stderr, "%d(%d.%d): %s %g GHz G(%lu) S(%lu) C(%lu) R(%d) W(%d)\n"
"\tl%d o%d c%d h%d i%d m%d a%lu M(%lu) T(%d: %d,%d,%d) G(%d,%d,%d)\n",
device, prop.major, prop.minor, prop.name, prop.clockRate/1.e6,
(unsigned long)prop.totalGlobalMem, (unsigned long)prop.sharedMemPerBlock,
(unsigned long)prop.totalConstMem, prop.regsPerBlock, prop.warpSize,
prop.kernelExecTimeoutEnabled, prop.deviceOverlap, prop.computeMode,
prop.canMapHostMemory, prop.integrated, prop.multiProcessorCount,
(unsigned long)prop.textureAlignment,
(unsigned long)prop.memPitch, prop.maxThreadsPerBlock,
prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2],
prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
}
fprintf(stderr, "\n");
}
static unsigned int old=0;
#endif
void print();
void closeFile();
void setNameWithGeometry(int x, int y, int z);
void kernel(unsigned int num){
#ifdef XCPU
unsigned int & old = num;
#endif
if(old>0){
d.hidx=0;
#ifdef XCPU
for(d.blockIdx=0, d.gridDim=numBlock, blockDim.x=threadPerBlock; d.blockIdx<d.gridDim; d.blockIdx++)
for(threadIdx.x=0; threadIdx.x<blockDim.x; threadIdx.x++) propagate(e, num);
if(d.hidx>=d.hnum){ d.hidx=d.hnum; cerr<<"Error: data buffer overflow occurred!"<<endl; }
#else
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++) {
i->set();
i->kernel_i();
}
#endif
#ifndef CAMERA
cerr<<"photons: "<<old<<" hits: "<<d.hidx<<endl;
#endif
}
#ifndef XCPU
{
unsigned int over=d.type == 0 ? OVER : 1, sum=0;
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++){
i->num=over*((num*(unsigned long long) i->pmax)/(over*(unsigned long long) pmax));
sum+=i->num;
}
while(num>sum){
static int res=0;
gpu& g=gpus[res++%gpus.size()];
if(g.num<g.pmax) g.num+=over, sum+=over;
}
}
if(d.type==0){
unsigned int idx=0;
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++) i->set(), i->kernel_c(idx);
}
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++) i->set(), i->kernel_f();
#endif
if(old>0) print();
#ifndef XCPU
old=num;
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++) i->old=i->num;
#endif
}
float square(float x){
return x*x;
}
/*
init the flasher position and FWID
*/
int flset(int str, int dom){
int type=1;
float r[3]={0, 0, 0};
if(str<0){ type=2; str=-str; }
if(str==0) switch(dom){
case 1: type=3; r[0]=544.07; r[1]=55.89; r[2]=136.86; break;
case 2: type=4; r[0]=11.87; r[1]=179.19; r[2]=-205.64; break;
}
else {
for(int n=0; n<d.gsize; n++) {
if(q.names[n].str==str && q.names[n].dom==dom){
d.fla=n;
for(int m=0; m<3; m++) r[m]=q.oms[n].r[m]; break;
}
}
}
for(int m=0; m<3; m++)
d.r[m]=r[m];
float fwid=9.7f;
{
char * FWID=getenv("FWID");
if(FWID!=NULL){ fwid=atof(FWID);
cerr<<"Setting flasher beam width to "<<fwid<<" degrees"<<endl;
}
}
if(fwid<0) d.ka=-1, d.up=0; else
switch(type){
case 1: d.ka=square(fcv*fwid); d.up=fcv*0.0f; break;
case 2: d.ka=square(fcv*fwid); d.up=fcv*48.f; break;
case 3: d.ka=0.0f; d.up=fcv*(90.0f-41.13f); break;
case 4: d.ka=0.0f; d.up=fcv*(41.13f-90.0f); break;
}
return type;
}
void flini(int str, int dom){
d.type = flset(str, dom);
ini(d.type);
}
#ifdef XLIB
const DOM& flget(int str, int dom){
static DOM om;
flset(str, dom); ini(0);
for(int m=0; m<3; m++) om.r[m]=d.r[m];
return om;
}
void flshift(float r[], float n[], float *m = NULL){
float sft[3]={0};
if(d.ka>0){
float FLZ, FLR;
sincosf(fcv*30.f, &FLZ, &FLR);
FLZ*=OMR, FLR*=OMR;
sft[0]+=FLR*n[0];
sft[1]+=FLR*n[1];
sft[2]+=FLZ;
r[3]+=OMR*d.ocv;
}
float xi;
sincosf(d.up, &n[2], &xi);
n[0]*=xi; n[1]*=xi;
if(m!=NULL){
float o[3]={0,0,1};
float r[3];
r[0]=m[1]*o[2]-m[2]*o[1]; // m[1]
r[1]=m[2]*o[0]-m[0]*o[2]; //-m[0]
r[2]=m[0]*o[1]-m[1]*o[0]; // 0
float norm=sqrt(r[0]*r[0]+r[1]*r[1]+r[2]*r[2]);
if(norm>0){
float cs=0;
for(int i=0; i<3; i++) r[i]/=norm, cs+=o[i]*m[i];
float sn=sin(acos(cs)); //norm
float R[3][3]={0};
for(int i=0; i<3; i++)
for(int j=0; j<3; j++)
R[i][j]=(i==j?cs:sn*r[3-i-j]*((j-i+3)%3==1?1:-1))+(1-cs)*r[i]*r[j];
float tmp[3];
for(int i=0; i<3; i++){
tmp[i]=0;
for(int j=0; j<3; j++) tmp[i]+=R[i][j]*n[j];
}
for(int i=0; i<3; i++) n[i]=tmp[i];
for(int i=0; i<3; i++){
tmp[i]=0;
for(int j=0; j<3; j++) tmp[i]+=R[i][j]*sft[j];
}
for(int i=0; i<3; i++) sft[i]=tmp[i];
}
}
for(int i=0; i<3; i++) r[i]+=sft[i];
}
#endif
void flone(unsigned long long num){
for(long long i=llroundf(num*(long double)d.eff); i>0; i-=pmax) kernel(min(i, (long long) pmax));
#ifndef XCPU
kernel(0);
#endif
}
void flasher(int str, int dom, unsigned long long num, int itr){
flini(str, dom);
#ifdef CAMERA
setNameWithGeometry(x_diff, y_diff, z_diff);
#endif
for(int j=0; j<max(1, itr); j++){
iterNum = j;
flone(num);
// if(itr>0) printf("\n");
closeFile();
}
fin();
#ifdef CAMERA
#endif
}
void setOrder(int order){
photonOrder = order;
}
#ifdef XCPU
void start(){}
void stop(){}
void choose(int device){
sv+=device;
seed=device;
numBlock=numBlock, threadPerBlock=threadPerBlock;
}
void listDevices(){}
#else
/*
Before init the cuda, set the flag to configure sync
*/
void start(){
hipSetDeviceFlags(hipDeviceScheduleBlockingSync);
}
void stop(){
fprintf(stderr, "\n");
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++) i->set(), i->stop();
}
void choose(int device){
if(device<0){
int deviceCount; hipGetDeviceCount(&deviceCount);
for(int device=0; device<deviceCount; ++device){
gpus.push_back(gpu(device));
if(gpus.back().npho<=0) gpus.pop_back();
}
}
else{
sv+=device;
gpus.push_back(gpu(device));
if(gpus.back().npho<=0) gpus.pop_back();
}
if(gpus.size()<=0){
cerr<<"No active GPU(s) selected!"<<endl;
exit(5);
}
xgpu=gpus.size()>1;
}
#endif
#include "f2k.cxx"
}
#ifndef XLIB
using namespace xppc;
int main(int arg_c, char *arg_a[]){
start();
if(arg_c<=1){
listDevices();
fprintf(stderr, "Use: %s [device] (f2k muons)\n"
" %s [str] [om] [num] [device] (flasher)\n", arg_a[0], arg_a[0]);
}
else if(0==strcmp(arg_a[1], "-")){
initialize();
ices & w = z.w[WNUM/2];
cerr<<"For wavelength="<<w.wvl<<" [nm] np="<<(1/w.coschr)<<" cm="<<1/w.ocm<<" [m/ns]"<<endl;
#ifdef TILT
float4 r;
r.w=0;
if(arg_c==4){
r.x=atof(arg_a[2]);
r.y=atof(arg_a[3]);
}
else r.x=0, r.y=0;
#endif
for(int i=0; i<d.size; i++){
float z=d.hmin+d.dh*i;
#ifdef TILT
r.z=z; for(int j=0; j<10; j++) r.z=z+zshift(d, r); z=r.z;
#endif
cout<<z<<" "<<w.z[i].abs<<" "<<w.z[i].sca*(1-d.g)<<endl;
}
}
else if(arg_c<=2){
int device=0;
if(arg_c>1) device=atoi(arg_a[1]);
initialize();
choose(device);
fprintf(stderr, "Processing f2k muons from stdin on device %d\n", device);
f2k();
}
// Main operation for flasher
else{
int ledStr=0, ledDom=0, device=0, itr=0;
unsigned long long num=1000000ULL;
if(arg_c>1) ledStr=atoi(arg_a[1]);
if(arg_c>2) ledDom=atoi(arg_a[2]);
if(arg_c>3){
num=(unsigned long long) atof(arg_a[3]);
char * sub = strchr(arg_a[3], '*');
if(sub!=NULL) itr=(int) atof(++sub);
}
if(arg_c>4) device=atoi(arg_a[4]);
int order = log10(num);
// cout << order << endl;
initialize();
// listDevices();
setOrder(order);
choose(device);
fprintf(stderr, "Running flasher simulation on device %d\n", device);
flasher(ledStr, ledDom, num, itr);
}
stop();
}
#endif
|
5c121efcd9c034e62dc18c16b3e8a535296a9cd7.cu
|
#include <map>
#include <deque>
#include <vector>
#include <cmath>
#include <sstream>
#include <fstream>
#include <iostream>
#include <algorithm>
#include <sys/time.h>
#ifndef __CUDACC__
#define XCPU
#elif __CUDA_ARCH__ >= 120
#define USMA
#endif
#ifdef XCPU
#include <cmath>
#include <cstring>
#endif
using namespace std;
namespace xppc{
#include "ini.cxx"
#include "pro.cu"
void initialize(float enh = 1.f){
m.set();
d.eff *= enh;
}
unsigned int pmax, pmxo, pn;
#ifdef XCPU
dats *e; // pointer to a copy of "d" on device
int numBlock, threadPerBlock, ntot;
void ini(int type){
rs_ini();
pn=0;
ntot=numBlock*threadPerBlock;
pmax=ntot*NPHO;
pmxo=pmax/OVER;
pmax=pmxo*OVER;
d.hnum=pmax/HQUO;
{
d.hits = q.hits = new hit[d.hnum];
if(type==0) d.pz = q.pz = new photon[pmxo];
#ifdef TALL
d.bf = new pbuf[pmax];
#endif
}
{
d.z=&z; e=&d; oms=q.oms;
}
{
unsigned int size=d.rsize, need=seed+1;
if(size<need) cerr<<"Error: not enough multipliers: asked for "<<seed<<"-th out of "<<size<<"!"<<endl;
}
}
void fin(){
if(d.type==0) delete d.pz;
delete d.hits;
#ifdef TALL
delete d.bf;
#endif
}
#else
bool xgpu=false;
void checkError(cudaError result){
if(result!=cudaSuccess){
cerr<<"CUDA Error: "<<cudaGetErrorString(result)<<endl;
exit(2);
}
}
struct gpu{
dats d;
dats *e; // pointer to a copy of "d" on device
int device;
int numBlock, threadPerBlock, ntot; // total threads in parallel
unsigned int npho, pmax, pmxo;
float dt, deviceTime, threadMin, threadMax;
cudaDeviceProp prop;
cudaStream_t stream;
cudaEvent_t evt1, evt2;
unsigned int old, num;
gpu(int device) : deviceTime(0), threadMin(0), threadMax(0), old(0), npho(NPHO){
this->device=device;
{
ostringstream o; o<<"NPHO_"<<device;
char * nph=getenv(o.str().c_str());
if(nph==NULL) nph=getenv("NPHO");
if(nph!=NULL) if(*nph!=0){
npho=atoi(nph);
cerr<<"Setting NPHO="<<npho<<endl;
if(npho<=0){
cerr<<"Not using device # "<<device<<"!"<<endl;
return;
}
}
}
checkError(cudaSetDevice(device));
checkError(cudaGetDeviceProperties(&prop, device));
#if CUDART_VERSION >= 3000
checkError(cudaFuncSetCacheConfig(propagate, cudaFuncCachePreferL1));
#endif
cudaFuncAttributes attr;
checkError(cudaFuncGetAttributes (&attr, propagate));
numBlock = prop.multiProcessorCount;
threadPerBlock = attr.maxThreadsPerBlock;
// threadPerBlock = 512;
// Change copy process?
// numBlock = prop.multiProcessorCount * 2;
// threadPerBlock = 256;
cerr << "Running on " << numBlock << " blocks x " << threadPerBlock << " threads" << endl;
fprintf(stderr, "Kernel uses: l=%lu r=%d s=%lu c=%lu\n", (unsigned long)attr.localSizeBytes,
attr.numRegs, (unsigned long)attr.sharedSizeBytes, (unsigned long)attr.constSizeBytes);
}
void ini(int type){
// init the random number generator
rs_ini();
d = xppc::d;
// Check BADMP
{
d.blockIdx = -1, d.gridDim=numBlock;
ostringstream o; o<<"BADMP_"<<device;
char * bmp=getenv(o.str().c_str());
if(bmp==NULL) bmp=getenv("BADMP");
if(bmp!=NULL) if(*bmp!=0){
d.blockIdx=atoi(bmp), d.gridDim--;
cerr<<"Not using MP #"<<d.blockIdx<<endl;
}
}
ntot = numBlock * threadPerBlock;
{
unsigned long xmem = prop.totalGlobalMem;
while(npho>1){
pmax = ntot * npho;
pmxo = pmax / OVER;
pmax = pmxo*OVER;
d.hnum = pmax/HQUO; // save at most photon
unsigned long mtot = sizeof(datz) + sizeof(dats) + d.gsize * sizeof(DOM);
mtot += +d.hnum*sizeof(hit);
if(d.type==0) mtot += pmxo*sizeof(photon);
#ifdef TALL
mtot += pmax*sizeof(pbuf);
#endif
if(mtot > xmem) npho/=2; else break;
}
}
{
checkError(cudaStreamCreate(&stream));
checkError(cudaEventCreateWithFlags(&evt1, cudaEventBlockingSync));
checkError(cudaEventCreateWithFlags(&evt2, cudaEventBlockingSync));
}
{
unsigned int size=d.rsize;
if(size<ntot) cerr<<"Error: not enough multipliers: only have "<<size<<" (need "<<ntot<<")!"<<endl;
else d.rsize=ntot;
}
unsigned long tot=0, cnt=0;
{
unsigned long size=sizeof(datz); tot+=size;
checkError(cudaMalloc((void**) &d.z, size));
checkError(cudaMemcpy(d.z, &z, size, cudaMemcpyHostToDevice));
}
{
unsigned long size=d.hnum*sizeof(hit); tot+=size;
checkError(cudaMalloc((void**) &d.hits, size));
}
if(d.type==0){
unsigned long size=pmxo*sizeof(photon); tot+=size;
checkError(cudaMalloc((void**) &d.pz, size));
}
#ifdef TALL
{
unsigned long size=pmax*sizeof(pbuf); tot+=size;
checkError(cudaMalloc((void**) &d.bf, size));
}
#endif
{
unsigned long size=d.gsize*sizeof(DOM); cnt+=size;
checkError(cudaMemcpyToSymbol(oms, q.oms, size));
}
{
unsigned long size=sizeof(dats); tot+=size;
checkError(cudaMalloc((void**) &e, size));
checkError(cudaMemcpy(e, &d, size, cudaMemcpyHostToDevice));
}
cerr << "Total GPU memory usage: "<< tot << " const: " << cnt << " (npho="<<npho<<")"<<endl;
}
void fin(){
checkError(cudaFree(d.z));
checkError(cudaFree(d.hits));
if(d.type==0) checkError(cudaFree(d.pz));
#ifdef TALL
checkError(cudaFree(d.bf));
#endif
checkError(cudaFree(e));
checkError(cudaEventDestroy(evt1));
checkError(cudaEventDestroy(evt2));
checkError(cudaStreamDestroy(stream));
// closeFile();
}
void set(){
if(xgpu) checkError(cudaSetDevice(device));
}
void kernel_i(){
{
checkError(cudaStreamSynchronize(stream));
checkError(cudaMemcpy(&d, e, 7*sizeof(int), cudaMemcpyDeviceToHost));
checkError(cudaEventElapsedTime(&dt, evt1, evt2)); deviceTime+=dt;
if(d.ab>0){
cerr<<"Error: TOT was a nan or an inf "<<d.ab<<" times! Bad GPU "<<device<<" MP";
for(int i=0; i<min(d.ab, 4); i++) cerr<<" #"<<d.bmp[i]; cerr<<endl;
}
if(d.mp!=d.gridDim){ cerr<<"Error: did not encounter MP #"<<d.blockIdx<<endl; exit(4); }
if(threadMax!=-1){
if((unsigned long long)(dt*prop.clockRate)<0x100000000ULL){
threadMin+=d.tn/(float)prop.clockRate;
threadMax+=d.tx/(float)prop.clockRate;
}
else threadMin=-1, threadMax=-1;
}
if(d.hidx>=d.hnum){ d.hidx=d.hnum; cerr<<"Error: data buffer overflow occurred!"<<endl; }
}
{
unsigned int size=d.hidx*sizeof(hit);
checkError(cudaMemcpyAsync(&q.hits[xppc::d.hidx], d.hits, size, cudaMemcpyDeviceToHost, stream));
xppc::d.hidx+=d.hidx;
}
}
void kernel_c(unsigned int & idx){
if(old>0) checkError(cudaStreamSynchronize(stream));
unsigned int pn=num/OVER;
unsigned int size=pn*sizeof(photon);
checkError(cudaMemcpyAsync(d.pz, &q.pz[idx], size, cudaMemcpyHostToDevice, stream));
idx+=pn;
}
void kernel_f(){
checkError(cudaStreamSynchronize(stream));
if(num>0){
checkError(cudaEventRecord(evt1, stream));
propagate<<< 1, 1, 0, stream >>>(e, 0);
checkError(cudaGetLastError());
// propagate<<< numBlock, threadPerBlock, 0, stream >>>(e, num);
propagate<<< numBlock, threadPerBlock, 0, stream >>>(e, num);
checkError(cudaGetLastError());
checkError(cudaEventRecord(evt2, stream));
// checkError(cudaEventSynchronize(evt2));
}
}
void stop(){
fprintf(stderr, "Device time: %2.1f (in-kernel: %2.1f...%2.1f) [ms]\n", deviceTime, threadMin, threadMax);
checkError(cudaThreadExit());
}
};
vector<gpu> gpus;
void ini(int type){
// init the size of hit buffer
d.hnum=0;
pmax=0, pmxo=0, pn=0;
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++){
i->set();
i->ini(type); if(xgpu) sv++;
d.hnum+=i->d.hnum;
pmax+=i->pmax, pmxo+=i->pmxo;
}
{
unsigned long size=d.hnum*sizeof(hit);
checkError(cudaMallocHost((void**) &q.hits, size));
}
if(d.type==0){
unsigned long size=pmxo*sizeof(photon);
checkError(cudaMallocHost((void**) &q.pz, size));
}
}
void fin(){
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++) i->set(), i->fin();
checkError(cudaFreeHost(q.hits));
if(d.type==0) checkError(cudaFreeHost(q.pz));
}
void listDevices(){
int deviceCount, driver, runtime;
cudaGetDeviceCount(&deviceCount);
cudaDriverGetVersion(&driver);
cudaRuntimeGetVersion(&runtime);
fprintf(stderr, "Found %d devices, driver %d, runtime %d\n", deviceCount, driver, runtime);
for(int device=0; device<deviceCount; ++device){
cudaDeviceProp prop; cudaGetDeviceProperties(&prop, device);
fprintf(stderr, "%d(%d.%d): %s %g GHz G(%lu) S(%lu) C(%lu) R(%d) W(%d)\n"
"\tl%d o%d c%d h%d i%d m%d a%lu M(%lu) T(%d: %d,%d,%d) G(%d,%d,%d)\n",
device, prop.major, prop.minor, prop.name, prop.clockRate/1.e6,
(unsigned long)prop.totalGlobalMem, (unsigned long)prop.sharedMemPerBlock,
(unsigned long)prop.totalConstMem, prop.regsPerBlock, prop.warpSize,
prop.kernelExecTimeoutEnabled, prop.deviceOverlap, prop.computeMode,
prop.canMapHostMemory, prop.integrated, prop.multiProcessorCount,
(unsigned long)prop.textureAlignment,
(unsigned long)prop.memPitch, prop.maxThreadsPerBlock,
prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2],
prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
}
fprintf(stderr, "\n");
}
static unsigned int old=0;
#endif
void print();
void closeFile();
void setNameWithGeometry(int x, int y, int z);
void kernel(unsigned int num){
#ifdef XCPU
unsigned int & old = num;
#endif
if(old>0){
d.hidx=0;
#ifdef XCPU
for(d.blockIdx=0, d.gridDim=numBlock, blockDim.x=threadPerBlock; d.blockIdx<d.gridDim; d.blockIdx++)
for(threadIdx.x=0; threadIdx.x<blockDim.x; threadIdx.x++) propagate(e, num);
if(d.hidx>=d.hnum){ d.hidx=d.hnum; cerr<<"Error: data buffer overflow occurred!"<<endl; }
#else
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++) {
i->set();
i->kernel_i();
}
#endif
#ifndef CAMERA
cerr<<"photons: "<<old<<" hits: "<<d.hidx<<endl;
#endif
}
#ifndef XCPU
{
unsigned int over=d.type == 0 ? OVER : 1, sum=0;
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++){
i->num=over*((num*(unsigned long long) i->pmax)/(over*(unsigned long long) pmax));
sum+=i->num;
}
while(num>sum){
static int res=0;
gpu& g=gpus[res++%gpus.size()];
if(g.num<g.pmax) g.num+=over, sum+=over;
}
}
if(d.type==0){
unsigned int idx=0;
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++) i->set(), i->kernel_c(idx);
}
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++) i->set(), i->kernel_f();
#endif
if(old>0) print();
#ifndef XCPU
old=num;
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++) i->old=i->num;
#endif
}
float square(float x){
return x*x;
}
/*
init the flasher position and FWID
*/
int flset(int str, int dom){
int type=1;
float r[3]={0, 0, 0};
if(str<0){ type=2; str=-str; }
if(str==0) switch(dom){
case 1: type=3; r[0]=544.07; r[1]=55.89; r[2]=136.86; break;
case 2: type=4; r[0]=11.87; r[1]=179.19; r[2]=-205.64; break;
}
else {
for(int n=0; n<d.gsize; n++) {
if(q.names[n].str==str && q.names[n].dom==dom){
d.fla=n;
for(int m=0; m<3; m++) r[m]=q.oms[n].r[m]; break;
}
}
}
for(int m=0; m<3; m++)
d.r[m]=r[m];
float fwid=9.7f;
{
char * FWID=getenv("FWID");
if(FWID!=NULL){ fwid=atof(FWID);
cerr<<"Setting flasher beam width to "<<fwid<<" degrees"<<endl;
}
}
if(fwid<0) d.ka=-1, d.up=0; else
switch(type){
case 1: d.ka=square(fcv*fwid); d.up=fcv*0.0f; break;
case 2: d.ka=square(fcv*fwid); d.up=fcv*48.f; break;
case 3: d.ka=0.0f; d.up=fcv*(90.0f-41.13f); break;
case 4: d.ka=0.0f; d.up=fcv*(41.13f-90.0f); break;
}
return type;
}
void flini(int str, int dom){
d.type = flset(str, dom);
ini(d.type);
}
#ifdef XLIB
const DOM& flget(int str, int dom){
static DOM om;
flset(str, dom); ini(0);
for(int m=0; m<3; m++) om.r[m]=d.r[m];
return om;
}
void flshift(float r[], float n[], float *m = NULL){
float sft[3]={0};
if(d.ka>0){
float FLZ, FLR;
sincosf(fcv*30.f, &FLZ, &FLR);
FLZ*=OMR, FLR*=OMR;
sft[0]+=FLR*n[0];
sft[1]+=FLR*n[1];
sft[2]+=FLZ;
r[3]+=OMR*d.ocv;
}
float xi;
sincosf(d.up, &n[2], &xi);
n[0]*=xi; n[1]*=xi;
if(m!=NULL){
float o[3]={0,0,1};
float r[3];
r[0]=m[1]*o[2]-m[2]*o[1]; // m[1]
r[1]=m[2]*o[0]-m[0]*o[2]; //-m[0]
r[2]=m[0]*o[1]-m[1]*o[0]; // 0
float norm=sqrt(r[0]*r[0]+r[1]*r[1]+r[2]*r[2]);
if(norm>0){
float cs=0;
for(int i=0; i<3; i++) r[i]/=norm, cs+=o[i]*m[i];
float sn=sin(acos(cs)); //norm
float R[3][3]={0};
for(int i=0; i<3; i++)
for(int j=0; j<3; j++)
R[i][j]=(i==j?cs:sn*r[3-i-j]*((j-i+3)%3==1?1:-1))+(1-cs)*r[i]*r[j];
float tmp[3];
for(int i=0; i<3; i++){
tmp[i]=0;
for(int j=0; j<3; j++) tmp[i]+=R[i][j]*n[j];
}
for(int i=0; i<3; i++) n[i]=tmp[i];
for(int i=0; i<3; i++){
tmp[i]=0;
for(int j=0; j<3; j++) tmp[i]+=R[i][j]*sft[j];
}
for(int i=0; i<3; i++) sft[i]=tmp[i];
}
}
for(int i=0; i<3; i++) r[i]+=sft[i];
}
#endif
void flone(unsigned long long num){
for(long long i=llroundf(num*(long double)d.eff); i>0; i-=pmax) kernel(min(i, (long long) pmax));
#ifndef XCPU
kernel(0);
#endif
}
void flasher(int str, int dom, unsigned long long num, int itr){
flini(str, dom);
#ifdef CAMERA
setNameWithGeometry(x_diff, y_diff, z_diff);
#endif
for(int j=0; j<max(1, itr); j++){
iterNum = j;
flone(num);
// if(itr>0) printf("\n");
closeFile();
}
fin();
#ifdef CAMERA
#endif
}
void setOrder(int order){
photonOrder = order;
}
#ifdef XCPU
void start(){}
void stop(){}
void choose(int device){
sv+=device;
seed=device;
numBlock=numBlock, threadPerBlock=threadPerBlock;
}
void listDevices(){}
#else
/*
Before init the cuda, set the flag to configure sync
*/
void start(){
cudaSetDeviceFlags(cudaDeviceBlockingSync);
}
void stop(){
fprintf(stderr, "\n");
for(vector<gpu>::iterator i=gpus.begin(); i!=gpus.end(); i++) i->set(), i->stop();
}
void choose(int device){
if(device<0){
int deviceCount; cudaGetDeviceCount(&deviceCount);
for(int device=0; device<deviceCount; ++device){
gpus.push_back(gpu(device));
if(gpus.back().npho<=0) gpus.pop_back();
}
}
else{
sv+=device;
gpus.push_back(gpu(device));
if(gpus.back().npho<=0) gpus.pop_back();
}
if(gpus.size()<=0){
cerr<<"No active GPU(s) selected!"<<endl;
exit(5);
}
xgpu=gpus.size()>1;
}
#endif
#include "f2k.cxx"
}
#ifndef XLIB
using namespace xppc;
int main(int arg_c, char *arg_a[]){
start();
if(arg_c<=1){
listDevices();
fprintf(stderr, "Use: %s [device] (f2k muons)\n"
" %s [str] [om] [num] [device] (flasher)\n", arg_a[0], arg_a[0]);
}
else if(0==strcmp(arg_a[1], "-")){
initialize();
ices & w = z.w[WNUM/2];
cerr<<"For wavelength="<<w.wvl<<" [nm] np="<<(1/w.coschr)<<" cm="<<1/w.ocm<<" [m/ns]"<<endl;
#ifdef TILT
float4 r;
r.w=0;
if(arg_c==4){
r.x=atof(arg_a[2]);
r.y=atof(arg_a[3]);
}
else r.x=0, r.y=0;
#endif
for(int i=0; i<d.size; i++){
float z=d.hmin+d.dh*i;
#ifdef TILT
r.z=z; for(int j=0; j<10; j++) r.z=z+zshift(d, r); z=r.z;
#endif
cout<<z<<" "<<w.z[i].abs<<" "<<w.z[i].sca*(1-d.g)<<endl;
}
}
else if(arg_c<=2){
int device=0;
if(arg_c>1) device=atoi(arg_a[1]);
initialize();
choose(device);
fprintf(stderr, "Processing f2k muons from stdin on device %d\n", device);
f2k();
}
// Main operation for flasher
else{
int ledStr=0, ledDom=0, device=0, itr=0;
unsigned long long num=1000000ULL;
if(arg_c>1) ledStr=atoi(arg_a[1]);
if(arg_c>2) ledDom=atoi(arg_a[2]);
if(arg_c>3){
num=(unsigned long long) atof(arg_a[3]);
char * sub = strchr(arg_a[3], '*');
if(sub!=NULL) itr=(int) atof(++sub);
}
if(arg_c>4) device=atoi(arg_a[4]);
int order = log10(num);
// cout << order << endl;
initialize();
// listDevices();
setOrder(order);
choose(device);
fprintf(stderr, "Running flasher simulation on device %d\n", device);
flasher(ledStr, ledDom, num, itr);
}
stop();
}
#endif
|
077e73ac956e7e48d87c4c4e39d16e640ede5443.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "timerc.h"
#define SIZE 1024*1024*16
#define gerror(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*use pattern to compare starting with every possible position*/
__global__ void brute_force(char *text, char *pattern, int *match, int pattern_size, int text_size){
/*get the absolute pid*/
int pid = threadIdx.x + blockIdx.x*blockDim.x;
if (pid <= text_size - pattern_size){
int flag = 1;
for (int i = 0; i < pattern_size; i++){
if (text[pid+i] != pattern[i]){
flag = 0;
}
}
match[pid] = flag;
}
}
__global__ void brute_force_refine(char *text, char *pattern, int *index, int *match, int number_of_blocks, int blocksize, int pattern_size){
int id = threadIdx.x + blockDim.x*blockIdx.x;
if (id < number_of_blocks){
int starting_pos = index[id*blocksize];
int flag = 1;
for (int i=0; i<pattern_size; i++){
if (text[starting_pos+i] != pattern[i]){
flag = 0;
break;
}
}
match[starting_pos] = flag;
}
}
__global__ void brute_force_refine_blockoutput(char *text, char *pattern, int *blockoutput, int *match, int number_of_blocks, int blocksize, int pattern_size){
int id = threadIdx.x + blockDim.x*blockIdx.x;
if (id < number_of_blocks){
int flag = 1;
int starting_pos = blockoutput[id];
for (int i=0; i<pattern_size; i++){
if (text[starting_pos+i] != pattern[i]){
flag = 0;
}
}
match[starting_pos] = flag;
}
}
__global__ void nonperiodic_version_binary_tree_shared_memory(char *text, char *pattern, int *blockoutput, int *witness_array, int blocksize){
int pid = threadIdx.x;
int id = threadIdx.x + blockDim.x*blockIdx.x;
//int size = blocksize;
int s = (blocksize + 2 - 1) / 2;
int size = blocksize;
__shared__ int buffer[1024];
buffer[pid] = id;
__syncthreads();
while(s >= 2){
if (threadIdx.x <= size/2){
int starting_pos = pid;
if (starting_pos + s >= size){
buffer[pid] = id;
}else{
int j = buffer[starting_pos+s];
int i = buffer[starting_pos];
int k = witness_array[j-i];
if (text[j+k] != pattern[k]){
buffer[starting_pos] = i;
}else{
buffer[starting_pos] = j;
}
}
}
__syncthreads();
s = (s+2-1)/2;
size = (size + 2 - 1) / 2;
}
__syncthreads();
if (threadIdx.x ==0){
int j = buffer[1];
int i = buffer[0];
int k = witness_array[j-i];
if (text[j + k ] != pattern[k]){
blockoutput[blockIdx.x] = i;
}else{
blockoutput[blockIdx.x] = j;
}
}
}
__global__ void nonperiodic_version_binary_tree(char *text, char *pattern, int *output, int *witness_array, int blocksize){
//get index
int id = threadIdx.x + blockDim.x*blockIdx.x;
//use for looping
int size = blocksize;
int s = (blocksize + 2 - 1) / 2;
//starting pos for the thread
//create a dynamically large shared memory
//read index to buffer
output[id] = id;
__syncthreads();
while(s>=2){
int starting_pos = id;
if (threadIdx.x<size/2){
if (starting_pos + s <size){
int j = output[starting_pos+s];
int i = output[starting_pos];
int k = witness_array[j-i];
if (text[output[j + k ]] == pattern[k]){
output[starting_pos] = j;
}
}
}
__syncthreads();
s = (s+2-1)/2;
size = (size + 2 - 1) / 2;
}
if (threadIdx.x ==0){
int starting_pos = id;
int j = output[starting_pos+s];
int i = output[starting_pos];
int k = witness_array[j-i];
if (text[output[j + k ]] == pattern[k]){
output[starting_pos] = output[starting_pos+s];
}
}
// output[blockIdx.x] = buffer[blockIdx.x * blockDim.x];
}
int cap_division(int x, int y){
return (x + y - 1) / y;
}
/*CPU version of wintess array calculation*/
void witness_array_cpu(char *pattern, int *witness_array, int pattern_size){
if (pattern_size >2){
witness_array[0] = 0;
for (int i = 1; i<cap_division(pattern_size, 2); i++){
for (int j=0; j<cap_division(pattern_size, 2); j++){
if (pattern[j] != pattern[i+j]){
witness_array[i] = j;
break;
}
}
}
}else{
witness_array[0] = 0;
}
}
void failure_function_cpu(char *pattern, int *failure_function, int pattern_size){
failure_function[0] = 0;
int k = 1;
int j = 0;
while ( k < pattern_size){
if (pattern[k] == pattern[j]){
j ++;
failure_function[k] = j;
k ++;
}else{
if (j !=0){
k = failure_function[k-1];
}else{
failure_function[k] =0;
k++;
}
}
}
}
void serial_string_matching_KMP(char *text, char *pattern, int pattern_size, int text_size, int *failure_function){
//index for text
int i = 0;
//index for pattern
int j = 0;
while (i < text_size){
//if matching increment both index
if (pattern[j] == text[i]){
j++;
i++;
}
//if match the pattern print message
if (j == pattern_size){
//printf("found at index %d \n", i-j);
j = failure_function[j-1];
}
else if ( i < text_size && pattern[j] != text[i]){
if (j != 0){
j = failure_function[j-1];
}else{
i+=1;
}
}
}
}
int main(){
/*initialization;
open file
read file char by char and store in heap
*/
FILE *fp;
FILE *fp2;
char ch;
fp = fopen("test.txt", "r");
fp2 = fopen("pattern.txt", "r");
char * text = (char *) malloc (SIZE*sizeof(char)); //size text buffer for text
char * pattern = (char *) malloc (SIZE*sizeof(char));
int * match; //size text buffeer for match array
int size = 0;
int pattern_size = 0;
//int blocksize = 32;
//intialized time
float cpuTime;
float gpuTime0;
float gpuTime1;
float gpuTime2;
float gpuTime3;
float cpuTime1;
//read text to buffer
while ((ch = getc(fp)) != EOF){
text[size] = ch;
//match[size] = 0;
size ++;
if (size>=SIZE) break;
}
while ((ch =getc(fp2))!=EOF){
pattern[pattern_size] = ch;
pattern_size++;
}
size --;
pattern_size--;
printf("size %d \n", size);
printf("pattern size %d \n", pattern_size);
int *output = (int *) malloc (sizeof(int)*size);
/*initialized match array*/
match = (int *) malloc (size*sizeof(int));
for (int i = 0; i < size; i++){
match[i] = -1;
}
/*malloc wintess array*/
int *witness_array = (int *)malloc(sizeof(int)*cap_division(pattern_size, 2));
witness_array_cpu(pattern, witness_array, pattern_size);
cstart();
int *failure_function = (int *)malloc(sizeof(int)*(pattern_size));
failure_function_cpu(pattern, failure_function, pattern_size);
cend(&cpuTime);
cstart();
serial_string_matching_KMP(text, pattern, pattern_size, size, failure_function);
cend(&cpuTime1);
printf("CPU prepare time: %f", cpuTime);
printf("KMP time: %f", cpuTime1);
/* GPU init*/
//text buffer in device
char *dev_text;
//pattern buffer in device
char *dev_pattern;
// match buffer in device
int *dev_match;
//output buffer in device
int *dev_output;
//witness array
int *dev_witness;
//block output
int *dev_blockoutput;
//config block and thread size
//int number_of_threads = 32
int number_of_threads = cap_division(pattern_size, 2);
int number_of_blocks = (size + number_of_threads - 1) / number_of_threads;
int *blockoutput = (int *) malloc(number_of_blocks*sizeof(int));
gstart();
hipMalloc((void **)&dev_text, size*sizeof(char));
hipMalloc((void **)&dev_pattern, pattern_size*sizeof(char));
hipMalloc((void **)&dev_match, size*sizeof(int));
//hipMalloc((void **)&dev_output, sizeof(int)*size);
hipMalloc((void **)&dev_blockoutput, sizeof(int)*number_of_blocks);
hipMalloc((void **)&dev_witness, sizeof(int)*cap_division(pattern_size, 2));
hipMemcpy(dev_text, text, size*sizeof(char), hipMemcpyHostToDevice);
hipMemcpy(dev_pattern, pattern, pattern_size*sizeof(char), hipMemcpyHostToDevice);
hipMemcpy(dev_witness, witness_array, cap_division(pattern_size, 2)*sizeof(int), hipMemcpyHostToDevice);
gend(&gpuTime0);
gstart();
//brute_force<<<(size + 1024 - 1) / 1024, 1024>>>(dev_text, dev_pattern, dev_match, pattern_size, size);
//nonperiodic_version_binary_tree<<<number_of_blocks, number_of_threads>>> (dev_text, dev_pattern, dev_output, dev_witness, number_of_threads);
hipLaunchKernelGGL(( nonperiodic_version_binary_tree_shared_memory), dim3(number_of_blocks), dim3(number_of_threads), 0, 0, dev_text, dev_pattern, dev_blockoutput, dev_witness, number_of_threads);
hipDeviceSynchronize();
gend(&gpuTime1);
gstart();
//brute_force_refine<<<(size + 1024 - 1) / 1024, 1024>>>(dev_text, dev_pattern, dev_output, dev_match, number_of_blocks ,cap_division(pattern_size, 2), pattern_size);
hipLaunchKernelGGL(( brute_force_refine_blockoutput), dim3((size + 1024 - 1) / 1024), dim3(1024), 0, 0, dev_text, dev_pattern, dev_blockoutput, dev_match, number_of_blocks ,cap_division(pattern_size, 2), pattern_size);
gend(&gpuTime2);
gstart();
hipMemcpy(match, dev_match, size*sizeof(int), hipMemcpyDeviceToHost);
//hipMemcpy(blockoutput, dev_blockoutput, size*sizeof(int), hipMemcpyDeviceToHost);
gend(&gpuTime3);
/*
printf("<<<<result>>>> \n");
int flag =1;
for (int i = 0; i< size; i++){
printf("%d", match[i]);
}
/*
if (flag ==1){
printf("success");
}else{
printf ("error");
}
printf("\n");
printf("<<<<output>>>> \n");
for (int i = 0; i< number_of_blocks; i++){
printf("%d ", blockoutput[i]);
}
printf("\n");
*/
gerror( hipPeekAtLastError() );
hipDeviceSynchronize();
/*free memory*/
hipFree(dev_text);
hipFree(dev_pattern);
hipFree(dev_match);
hipFree(dev_output);
hipFree(dev_witness);
hipFree(dev_blockoutput);
free(text);
free(pattern);
free(match);
free(witness_array);
free(failure_function);
printf("CPUTIME: %f, GPUTIME0: %f, GPUTIME1: %f, GPUTIME2:%f, GPUTIME3:%f, TOTAL: %f", cpuTime,gpuTime0, gpuTime1, gpuTime2, gpuTime3, cpuTime+gpuTime1+gpuTime2 + gpuTime0+gpuTime3);
//printf("CPUTIME: %f, GPUTIME0: %f, GPUTIME1: %f, GPUTIME3:%f, TOTAL: %f", cpuTime,gpuTime0, gpuTime1, gpuTime3, cpuTime+gpuTime1+gpuTime0+gpuTime3);
}
|
077e73ac956e7e48d87c4c4e39d16e640ede5443.cu
|
#include <stdio.h>
#include <stdlib.h>
#include "timerc.h"
#define SIZE 1024*1024*16
#define gerror(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*use pattern to compare starting with every possible position*/
__global__ void brute_force(char *text, char *pattern, int *match, int pattern_size, int text_size){
/*get the absolute pid*/
int pid = threadIdx.x + blockIdx.x*blockDim.x;
if (pid <= text_size - pattern_size){
int flag = 1;
for (int i = 0; i < pattern_size; i++){
if (text[pid+i] != pattern[i]){
flag = 0;
}
}
match[pid] = flag;
}
}
__global__ void brute_force_refine(char *text, char *pattern, int *index, int *match, int number_of_blocks, int blocksize, int pattern_size){
int id = threadIdx.x + blockDim.x*blockIdx.x;
if (id < number_of_blocks){
int starting_pos = index[id*blocksize];
int flag = 1;
for (int i=0; i<pattern_size; i++){
if (text[starting_pos+i] != pattern[i]){
flag = 0;
break;
}
}
match[starting_pos] = flag;
}
}
__global__ void brute_force_refine_blockoutput(char *text, char *pattern, int *blockoutput, int *match, int number_of_blocks, int blocksize, int pattern_size){
int id = threadIdx.x + blockDim.x*blockIdx.x;
if (id < number_of_blocks){
int flag = 1;
int starting_pos = blockoutput[id];
for (int i=0; i<pattern_size; i++){
if (text[starting_pos+i] != pattern[i]){
flag = 0;
}
}
match[starting_pos] = flag;
}
}
__global__ void nonperiodic_version_binary_tree_shared_memory(char *text, char *pattern, int *blockoutput, int *witness_array, int blocksize){
int pid = threadIdx.x;
int id = threadIdx.x + blockDim.x*blockIdx.x;
//int size = blocksize;
int s = (blocksize + 2 - 1) / 2;
int size = blocksize;
__shared__ int buffer[1024];
buffer[pid] = id;
__syncthreads();
while(s >= 2){
if (threadIdx.x <= size/2){
int starting_pos = pid;
if (starting_pos + s >= size){
buffer[pid] = id;
}else{
int j = buffer[starting_pos+s];
int i = buffer[starting_pos];
int k = witness_array[j-i];
if (text[j+k] != pattern[k]){
buffer[starting_pos] = i;
}else{
buffer[starting_pos] = j;
}
}
}
__syncthreads();
s = (s+2-1)/2;
size = (size + 2 - 1) / 2;
}
__syncthreads();
if (threadIdx.x ==0){
int j = buffer[1];
int i = buffer[0];
int k = witness_array[j-i];
if (text[j + k ] != pattern[k]){
blockoutput[blockIdx.x] = i;
}else{
blockoutput[blockIdx.x] = j;
}
}
}
__global__ void nonperiodic_version_binary_tree(char *text, char *pattern, int *output, int *witness_array, int blocksize){
//get index
int id = threadIdx.x + blockDim.x*blockIdx.x;
//use for looping
int size = blocksize;
int s = (blocksize + 2 - 1) / 2;
//starting pos for the thread
//create a dynamically large shared memory
//read index to buffer
output[id] = id;
__syncthreads();
while(s>=2){
int starting_pos = id;
if (threadIdx.x<size/2){
if (starting_pos + s <size){
int j = output[starting_pos+s];
int i = output[starting_pos];
int k = witness_array[j-i];
if (text[output[j + k ]] == pattern[k]){
output[starting_pos] = j;
}
}
}
__syncthreads();
s = (s+2-1)/2;
size = (size + 2 - 1) / 2;
}
if (threadIdx.x ==0){
int starting_pos = id;
int j = output[starting_pos+s];
int i = output[starting_pos];
int k = witness_array[j-i];
if (text[output[j + k ]] == pattern[k]){
output[starting_pos] = output[starting_pos+s];
}
}
// output[blockIdx.x] = buffer[blockIdx.x * blockDim.x];
}
int cap_division(int x, int y){
return (x + y - 1) / y;
}
/*CPU version of wintess array calculation*/
void witness_array_cpu(char *pattern, int *witness_array, int pattern_size){
if (pattern_size >2){
witness_array[0] = 0;
for (int i = 1; i<cap_division(pattern_size, 2); i++){
for (int j=0; j<cap_division(pattern_size, 2); j++){
if (pattern[j] != pattern[i+j]){
witness_array[i] = j;
break;
}
}
}
}else{
witness_array[0] = 0;
}
}
void failure_function_cpu(char *pattern, int *failure_function, int pattern_size){
failure_function[0] = 0;
int k = 1;
int j = 0;
while ( k < pattern_size){
if (pattern[k] == pattern[j]){
j ++;
failure_function[k] = j;
k ++;
}else{
if (j !=0){
k = failure_function[k-1];
}else{
failure_function[k] =0;
k++;
}
}
}
}
void serial_string_matching_KMP(char *text, char *pattern, int pattern_size, int text_size, int *failure_function){
//index for text
int i = 0;
//index for pattern
int j = 0;
while (i < text_size){
//if matching increment both index
if (pattern[j] == text[i]){
j++;
i++;
}
//if match the pattern print message
if (j == pattern_size){
//printf("found at index %d \n", i-j);
j = failure_function[j-1];
}
else if ( i < text_size && pattern[j] != text[i]){
if (j != 0){
j = failure_function[j-1];
}else{
i+=1;
}
}
}
}
int main(){
/*initialization;
open file
read file char by char and store in heap
*/
FILE *fp;
FILE *fp2;
char ch;
fp = fopen("test.txt", "r");
fp2 = fopen("pattern.txt", "r");
char * text = (char *) malloc (SIZE*sizeof(char)); //size text buffer for text
char * pattern = (char *) malloc (SIZE*sizeof(char));
int * match; //size text buffeer for match array
int size = 0;
int pattern_size = 0;
//int blocksize = 32;
//intialized time
float cpuTime;
float gpuTime0;
float gpuTime1;
float gpuTime2;
float gpuTime3;
float cpuTime1;
//read text to buffer
while ((ch = getc(fp)) != EOF){
text[size] = ch;
//match[size] = 0;
size ++;
if (size>=SIZE) break;
}
while ((ch =getc(fp2))!=EOF){
pattern[pattern_size] = ch;
pattern_size++;
}
size --;
pattern_size--;
printf("size %d \n", size);
printf("pattern size %d \n", pattern_size);
int *output = (int *) malloc (sizeof(int)*size);
/*initialized match array*/
match = (int *) malloc (size*sizeof(int));
for (int i = 0; i < size; i++){
match[i] = -1;
}
/*malloc wintess array*/
int *witness_array = (int *)malloc(sizeof(int)*cap_division(pattern_size, 2));
witness_array_cpu(pattern, witness_array, pattern_size);
cstart();
int *failure_function = (int *)malloc(sizeof(int)*(pattern_size));
failure_function_cpu(pattern, failure_function, pattern_size);
cend(&cpuTime);
cstart();
serial_string_matching_KMP(text, pattern, pattern_size, size, failure_function);
cend(&cpuTime1);
printf("CPU prepare time: %f", cpuTime);
printf("KMP time: %f", cpuTime1);
/* GPU init*/
//text buffer in device
char *dev_text;
//pattern buffer in device
char *dev_pattern;
// match buffer in device
int *dev_match;
//output buffer in device
int *dev_output;
//witness array
int *dev_witness;
//block output
int *dev_blockoutput;
//config block and thread size
//int number_of_threads = 32
int number_of_threads = cap_division(pattern_size, 2);
int number_of_blocks = (size + number_of_threads - 1) / number_of_threads;
int *blockoutput = (int *) malloc(number_of_blocks*sizeof(int));
gstart();
cudaMalloc((void **)&dev_text, size*sizeof(char));
cudaMalloc((void **)&dev_pattern, pattern_size*sizeof(char));
cudaMalloc((void **)&dev_match, size*sizeof(int));
//cudaMalloc((void **)&dev_output, sizeof(int)*size);
cudaMalloc((void **)&dev_blockoutput, sizeof(int)*number_of_blocks);
cudaMalloc((void **)&dev_witness, sizeof(int)*cap_division(pattern_size, 2));
cudaMemcpy(dev_text, text, size*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(dev_pattern, pattern, pattern_size*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(dev_witness, witness_array, cap_division(pattern_size, 2)*sizeof(int), cudaMemcpyHostToDevice);
gend(&gpuTime0);
gstart();
//brute_force<<<(size + 1024 - 1) / 1024, 1024>>>(dev_text, dev_pattern, dev_match, pattern_size, size);
//nonperiodic_version_binary_tree<<<number_of_blocks, number_of_threads>>> (dev_text, dev_pattern, dev_output, dev_witness, number_of_threads);
nonperiodic_version_binary_tree_shared_memory<<<number_of_blocks, number_of_threads>>> (dev_text, dev_pattern, dev_blockoutput, dev_witness, number_of_threads);
cudaDeviceSynchronize();
gend(&gpuTime1);
gstart();
//brute_force_refine<<<(size + 1024 - 1) / 1024, 1024>>>(dev_text, dev_pattern, dev_output, dev_match, number_of_blocks ,cap_division(pattern_size, 2), pattern_size);
brute_force_refine_blockoutput<<<(size + 1024 - 1) / 1024, 1024>>>(dev_text, dev_pattern, dev_blockoutput, dev_match, number_of_blocks ,cap_division(pattern_size, 2), pattern_size);
gend(&gpuTime2);
gstart();
cudaMemcpy(match, dev_match, size*sizeof(int), cudaMemcpyDeviceToHost);
//cudaMemcpy(blockoutput, dev_blockoutput, size*sizeof(int), cudaMemcpyDeviceToHost);
gend(&gpuTime3);
/*
printf("<<<<result>>>> \n");
int flag =1;
for (int i = 0; i< size; i++){
printf("%d", match[i]);
}
/*
if (flag ==1){
printf("success");
}else{
printf ("error");
}
printf("\n");
printf("<<<<output>>>> \n");
for (int i = 0; i< number_of_blocks; i++){
printf("%d ", blockoutput[i]);
}
printf("\n");
*/
gerror( cudaPeekAtLastError() );
cudaDeviceSynchronize();
/*free memory*/
cudaFree(dev_text);
cudaFree(dev_pattern);
cudaFree(dev_match);
cudaFree(dev_output);
cudaFree(dev_witness);
cudaFree(dev_blockoutput);
free(text);
free(pattern);
free(match);
free(witness_array);
free(failure_function);
printf("CPUTIME: %f, GPUTIME0: %f, GPUTIME1: %f, GPUTIME2:%f, GPUTIME3:%f, TOTAL: %f", cpuTime,gpuTime0, gpuTime1, gpuTime2, gpuTime3, cpuTime+gpuTime1+gpuTime2 + gpuTime0+gpuTime3);
//printf("CPUTIME: %f, GPUTIME0: %f, GPUTIME1: %f, GPUTIME3:%f, TOTAL: %f", cpuTime,gpuTime0, gpuTime1, gpuTime3, cpuTime+gpuTime1+gpuTime0+gpuTime3);
}
|
fef514cbbd964838e9abd76a704fd932ad1e311c.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
/*! \file DLVODriverPotentialPairGPU.cu
\brief Defines the driver functions for computing all types of pair forces on the GPU
*/
#include "EvaluatorPairDLVO.h"
#include "AllDriverPotentialPairGPU.cuh"
hipError_t gpu_compute_dlvo_forces(const pair_args_t & args,
const Scalar3 *d_params)
{
return gpu_compute_pair_forces<EvaluatorPairDLVO>(args,
d_params);
}
|
fef514cbbd964838e9abd76a704fd932ad1e311c.cu
|
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
/*! \file DLVODriverPotentialPairGPU.cu
\brief Defines the driver functions for computing all types of pair forces on the GPU
*/
#include "EvaluatorPairDLVO.h"
#include "AllDriverPotentialPairGPU.cuh"
cudaError_t gpu_compute_dlvo_forces(const pair_args_t & args,
const Scalar3 *d_params)
{
return gpu_compute_pair_forces<EvaluatorPairDLVO>(args,
d_params);
}
|
78fa6d773e3ed8357347d5500ad578a9687c0eb4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_helper_funcs.h"
#include "RGB.h"
#include <iostream>
using namespace std;
// Block dimensions
#define X_BLOCK_SIZE 32
#define Y_BLOCK_SIZE 32
// Shared variables: fast memory for each block
__shared__ float red_share[X_BLOCK_SIZE][Y_BLOCK_SIZE], green_share[X_BLOCK_SIZE][Y_BLOCK_SIZE], blue_share[X_BLOCK_SIZE][Y_BLOCK_SIZE];
__global__ void accumulate_colors(RGB *d_pixels, int height, int width, float3 *total_colors)
{
int x = blockIdx.x * blockDim.x + threadIdx.x; // width index
int y = blockIdx.y * blockDim.y + threadIdx.y; // height index
if (y >= height || x >= width) // index is not withing image
return;
int index = y * width + x; // index into pixel buffer
int totalThreads = blockDim.x;
int me = threadIdx.x;
red_share[me][threadIdx.y] = d_pixels[index].red;
green_share[me][threadIdx.y] = d_pixels[index].green;
blue_share[me][threadIdx.y] = d_pixels[index].blue;
__syncthreads(); //language extension not actually a function
while (totalThreads > 1){
int buddy_line = totalThreads / 2;
if (me < buddy_line){
int buddy = me + buddy_line;
red_share[me][threadIdx.y] += red_share[buddy][threadIdx.y];
green_share[me][threadIdx.y] += green_share[buddy][threadIdx.y];
blue_share[me][threadIdx.y] += blue_share[buddy][threadIdx.y];
}
__syncthreads();
totalThreads /= 2;
}
if (me == 0){
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
atomicAdd(&total_colors[myblock].x, red_share[me][threadIdx.y]);
atomicAdd(&total_colors[myblock].y, green_share[me][threadIdx.y]);
atomicAdd(&total_colors[myblock].z, blue_share[me][threadIdx.y]);
}
}
__host__ void d_compute_component_average(RGB *pixel, int height, int width)
{
RGB *d_pixel; // Part 1: pointer to array for storing image pixel on device
hipMalloc(&d_pixel, height * width * sizeof(RGB)); // Part 2: allocate space on device
hipMemcpy(d_pixel, pixel, height * width * sizeof(RGB), hipMemcpyHostToDevice); // Part 3: copy image to device
dim3 grid, block;
block.x = X_BLOCK_SIZE;
block.y = Y_BLOCK_SIZE;
grid.x = calcBlockDim(width, block.x);
grid.y = calcBlockDim(height, block.y);
float3 *d_total_colors; // part 1: pointer to array for storing partial sums from each block
// float3::x red
// float3::y green
// float3::z blue
hipMalloc(&d_total_colors, grid.x * grid.y * sizeof(float3)); // part 2: allocate space on device
hipMemset(d_total_colors, 0, grid.x * grid.y * sizeof(float3)); // part 3: initialize array to zero
accumulate_colors << <grid, block >> >(d_pixel, height, width, d_total_colors);
float3 *h_total_colors = new float3[grid.x * grid.y]; // allocate memory of host to collect partial sums from device
hipMemcpy(h_total_colors, d_total_colors, grid.x * grid.y * sizeof(float3), hipMemcpyDeviceToHost); // copy partial sums from device
double total_green = 0, total_red = 0, total_blue = 0;
for (int i = 0; i < grid.x*grid.y; ++i) { // sum up all the partial sums returned from each block
total_red += h_total_colors[i].x;
total_green += h_total_colors[i].y;
total_blue += h_total_colors[i].z;
}
// Display primary color averages
cout << "Red average: " << total_red / (height*width) << endl;
cout << "Green average: " << total_green / (height*width) << endl;
cout << "Blue average: " << total_blue / (height*width) << endl;
}
|
78fa6d773e3ed8357347d5500ad578a9687c0eb4.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_helper_funcs.h"
#include "RGB.h"
#include <iostream>
using namespace std;
// Block dimensions
#define X_BLOCK_SIZE 32
#define Y_BLOCK_SIZE 32
// Shared variables: fast memory for each block
__shared__ float red_share[X_BLOCK_SIZE][Y_BLOCK_SIZE], green_share[X_BLOCK_SIZE][Y_BLOCK_SIZE], blue_share[X_BLOCK_SIZE][Y_BLOCK_SIZE];
__global__ void accumulate_colors(RGB *d_pixels, int height, int width, float3 *total_colors)
{
int x = blockIdx.x * blockDim.x + threadIdx.x; // width index
int y = blockIdx.y * blockDim.y + threadIdx.y; // height index
if (y >= height || x >= width) // index is not withing image
return;
int index = y * width + x; // index into pixel buffer
int totalThreads = blockDim.x;
int me = threadIdx.x;
red_share[me][threadIdx.y] = d_pixels[index].red;
green_share[me][threadIdx.y] = d_pixels[index].green;
blue_share[me][threadIdx.y] = d_pixels[index].blue;
__syncthreads(); //language extension not actually a function
while (totalThreads > 1){
int buddy_line = totalThreads / 2;
if (me < buddy_line){
int buddy = me + buddy_line;
red_share[me][threadIdx.y] += red_share[buddy][threadIdx.y];
green_share[me][threadIdx.y] += green_share[buddy][threadIdx.y];
blue_share[me][threadIdx.y] += blue_share[buddy][threadIdx.y];
}
__syncthreads();
totalThreads /= 2;
}
if (me == 0){
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
atomicAdd(&total_colors[myblock].x, red_share[me][threadIdx.y]);
atomicAdd(&total_colors[myblock].y, green_share[me][threadIdx.y]);
atomicAdd(&total_colors[myblock].z, blue_share[me][threadIdx.y]);
}
}
__host__ void d_compute_component_average(RGB *pixel, int height, int width)
{
RGB *d_pixel; // Part 1: pointer to array for storing image pixel on device
cudaMalloc(&d_pixel, height * width * sizeof(RGB)); // Part 2: allocate space on device
cudaMemcpy(d_pixel, pixel, height * width * sizeof(RGB), cudaMemcpyHostToDevice); // Part 3: copy image to device
dim3 grid, block;
block.x = X_BLOCK_SIZE;
block.y = Y_BLOCK_SIZE;
grid.x = calcBlockDim(width, block.x);
grid.y = calcBlockDim(height, block.y);
float3 *d_total_colors; // part 1: pointer to array for storing partial sums from each block
// float3::x red
// float3::y green
// float3::z blue
cudaMalloc(&d_total_colors, grid.x * grid.y * sizeof(float3)); // part 2: allocate space on device
cudaMemset(d_total_colors, 0, grid.x * grid.y * sizeof(float3)); // part 3: initialize array to zero
accumulate_colors << <grid, block >> >(d_pixel, height, width, d_total_colors);
float3 *h_total_colors = new float3[grid.x * grid.y]; // allocate memory of host to collect partial sums from device
cudaMemcpy(h_total_colors, d_total_colors, grid.x * grid.y * sizeof(float3), cudaMemcpyDeviceToHost); // copy partial sums from device
double total_green = 0, total_red = 0, total_blue = 0;
for (int i = 0; i < grid.x*grid.y; ++i) { // sum up all the partial sums returned from each block
total_red += h_total_colors[i].x;
total_green += h_total_colors[i].y;
total_blue += h_total_colors[i].z;
}
// Display primary color averages
cout << "Red average: " << total_red / (height*width) << endl;
cout << "Green average: " << total_green / (height*width) << endl;
cout << "Blue average: " << total_blue / (height*width) << endl;
}
|
cefe9946cbccdb6e79a5e8e5f941d6b3d3faf6b5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 16
__global__ void matrixMult(const double *A, const double *B, double *C, int K, int N)
{
int i0 = blockDim.y * blockIdx.y + threadIdx.y;
int j0 = blockDim.x * blockIdx.x + threadIdx.x;
double sum = 0;
for (int k = 0; k < K; k++)
sum += A[i0 * K + k] * B[k * N + j0];
C[N * i0 + j0] = sum;
}
void init_matrix_rnd(double* &matrix, int number_row, int number_col)
{
for (size_t i = 0; i < number_row * number_col; i++)
matrix[i] = double(rand()) / double(1000);
}
int main()
{
//start, stop - for Kernel time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// A[MxK] B[KxN]
int M = 32, K = 48, N = 32;
// A B .
size_t Asize = M * K * sizeof(double);
size_t Bsize = K * N * sizeof(double);
size_t Csize = M * N * sizeof(double);
double *h_A = (double *)malloc(Asize);
double *h_B = (double *)malloc(Bsize);
double *h_C = (double *)malloc(Csize);
init_matrix_rnd(h_A, M, K);
init_matrix_rnd(h_B, K, N);
double *d_A = NULL;
hipMalloc((void **)&d_A, Asize);
double *d_B = NULL;
hipMalloc((void **)&d_B, Bsize);
double * d_C = NULL;
hipMalloc((void **)&d_C, Csize);
hipMemcpy(d_A, h_A, Asize, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, Bsize, hipMemcpyHostToDevice);
dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid = dim3(N / BLOCK_SIZE, M / BLOCK_SIZE);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( matrixMult), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, K, N);
hipEventRecord( stop, 0);
hipEventSynchronize( stop );
float KernelTime;
hipEventElapsedTime( &KernelTime, start, stop);
printf("KernelTime: %.2f milliseconds\n", KernelTime);
hipMemcpy(h_C, d_C, Csize, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
hipEventDestroy( start );
hipEventDestroy( stop );
return 0;
}
|
cefe9946cbccdb6e79a5e8e5f941d6b3d3faf6b5.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 16
__global__ void matrixMult(const double *A, const double *B, double *C, int K, int N)
{
int i0 = blockDim.y * blockIdx.y + threadIdx.y;
int j0 = blockDim.x * blockIdx.x + threadIdx.x;
double sum = 0;
for (int k = 0; k < K; k++)
sum += A[i0 * K + k] * B[k * N + j0];
C[N * i0 + j0] = sum;
}
void init_matrix_rnd(double* &matrix, int number_row, int number_col)
{
for (size_t i = 0; i < number_row * number_col; i++)
matrix[i] = double(rand()) / double(1000);
}
int main()
{
//start, stop - for Kernel time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// количество строк и столбцов матриц A[MxK] и B[KxN]
int M = 32, K = 48, N = 32;
// Размеры матриц A и B должны нацело делиться на размер блока.
size_t Asize = M * K * sizeof(double);
size_t Bsize = K * N * sizeof(double);
size_t Csize = M * N * sizeof(double);
double *h_A = (double *)malloc(Asize);
double *h_B = (double *)malloc(Bsize);
double *h_C = (double *)malloc(Csize);
init_matrix_rnd(h_A, M, K);
init_matrix_rnd(h_B, K, N);
double *d_A = NULL;
cudaMalloc((void **)&d_A, Asize);
double *d_B = NULL;
cudaMalloc((void **)&d_B, Bsize);
double * d_C = NULL;
cudaMalloc((void **)&d_C, Csize);
cudaMemcpy(d_A, h_A, Asize, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, Bsize, cudaMemcpyHostToDevice);
dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid = dim3(N / BLOCK_SIZE, M / BLOCK_SIZE);
cudaEventRecord(start, 0);
matrixMult<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, K, N);
cudaEventRecord( stop, 0);
cudaEventSynchronize( stop );
float KernelTime;
cudaEventElapsedTime( &KernelTime, start, stop);
printf("KernelTime: %.2f milliseconds\n", KernelTime);
cudaMemcpy(h_C, d_C, Csize, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
cudaEventDestroy( start );
cudaEventDestroy( stop );
return 0;
}
|
f00253ed40923e7de476b7fcedb3e2a8300ab372.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdexcept>
__global__ void kernel() { printf("The kernel ran!\n"); }
void test_cudaLaunchKernel()
{
hipStream_t stream;
hipStreamCreate(&stream);
hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, stream, );
hipError_t err{hipDeviceSynchronize()};
if (err != hipSuccess) { throw std::runtime_error("Kernel failed on non-default stream!"); }
err = hipGetLastError();
if (err != hipSuccess) { throw std::runtime_error("Kernel failed on non-default stream!"); }
try {
hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, );
} catch (std::runtime_error&) {
return;
}
throw std::runtime_error("No exception raised for kernel on default stream!");
}
int main() { test_cudaLaunchKernel(); }
|
f00253ed40923e7de476b7fcedb3e2a8300ab372.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdexcept>
__global__ void kernel() { printf("The kernel ran!\n"); }
void test_cudaLaunchKernel()
{
cudaStream_t stream;
cudaStreamCreate(&stream);
kernel<<<1, 1, 0, stream>>>();
cudaError_t err{cudaDeviceSynchronize()};
if (err != cudaSuccess) { throw std::runtime_error("Kernel failed on non-default stream!"); }
err = cudaGetLastError();
if (err != cudaSuccess) { throw std::runtime_error("Kernel failed on non-default stream!"); }
try {
kernel<<<1, 1>>>();
} catch (std::runtime_error&) {
return;
}
throw std::runtime_error("No exception raised for kernel on default stream!");
}
int main() { test_cudaLaunchKernel(); }
|
fb729732f7e193095a4ed4bc57609232f722a681.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Code made by Tomas Alejandro Lugo Salinas
// for the Hw7 of the lecture of Multiprocessors.
// Compiled in Windows 10 with: nvcc -Xcompiler "/openmp" -arch=sm_61 main.cu -o main.exe
// Executed in Windows 10 as: main.exe
#include <stdio.h>
#include <time.h>
#include <omp.h>
const long kCantidadIntervalos = 1000000000;
void originalPi(const long cantidad_intervalos, const int times = 1) {
printf("** Running the original code %d times **\n", times);
long total_time = 0;
double baseIntervalo;
double fdx;
double acum = 0;
clock_t start, end;
for(int iteration = 0; iteration < times; ++iteration) {
double x = 0;
long i;
baseIntervalo = 1.0 / cantidad_intervalos;
start = clock();
for (i = 0; i < cantidad_intervalos; i++) {
x = (i+0.5)*baseIntervalo;
fdx = 4 / (1 + x * x);
acum += fdx;
}
acum *= baseIntervalo;
end = clock();
total_time += (end - start);
printf("Result = %20.18lf (%ld)\n", acum, end - start);
}
printf("** The average of %d runs was: %ld **\n\n", times, total_time / times);
}
/////////////////// [ Begins code inspired from the internet ] //////////////////////
// The link of the reduction function guide is: https://riptutorial.com/cuda/example/22460/single-warp-parallel-reduction-for-commutative-operator
// Also, please note that a floating addition is not associative, so it gives some errors: https://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html
static const int warpSize = 32;
static const int blockSize = 1024;
__device__ int sumCommSingleWarp(volatile double* shArr) {
const int idx = threadIdx.x % warpSize; // The lane index in the warp
if (idx < 16) {
shArr[idx] += shArr[idx + 16];
shArr[idx] += shArr[idx + 8];
shArr[idx] += shArr[idx + 4];
shArr[idx] += shArr[idx + 2];
shArr[idx] += shArr[idx + 1];
}
return shArr[0];
}
__global__ void singleReduction(const int arraySize, const double *a, double *out) {
const int idx = threadIdx.x;
double sum = 0;
for (int i = idx; i < arraySize; i += blockSize)
sum += a[i];
__shared__ double r[blockSize];
r[idx] = sum;
sumCommSingleWarp(&r[idx & ~(warpSize-1)]);
__syncthreads();
if (idx<warpSize) { //first warp only
r[idx] = idx * warpSize < blockSize ? r[idx*warpSize] : 0;
sumCommSingleWarp(r);
if (idx == 0)
*out = r[0];
}
}
/////////////////// [ Ends code inspired from the internet ] //////////////////////
__global__ void singleGPUPi(const long cantidad_intervalos, const int total_threads, const int thread_per_block, double *acum_arr) {
const double base_intervalo = 1.0 / cantidad_intervalos;
const int idx = threadIdx.x + (blockIdx.x * thread_per_block);
const long intervalos_local = cantidad_intervalos / total_threads;
double x = base_intervalo * intervalos_local * idx;
double fdx;
double local_acum = 0;
for (long i = 0; i < intervalos_local; i++) {
fdx = 4.0 / (1 + x * x);
local_acum += (fdx * base_intervalo);
x = x + base_intervalo;
}
acum_arr[idx] = local_acum;
}
__global__ void singleGPUPiAtomic(const long cantidad_intervalos, const int total_threads, const int thread_per_block, double *acum) {
const double base_intervalo = 1.0 / cantidad_intervalos;
const int idx = threadIdx.x + (blockIdx.x * thread_per_block);
const long intervalos_local = cantidad_intervalos / total_threads;
double x = base_intervalo * intervalos_local * idx;
double fdx;
double local_acum = 0;
for (long i = 0; i < intervalos_local; i++) {
fdx = 4.0 / (1 + x * x);
local_acum += (fdx * base_intervalo);
x = x + base_intervalo;
}
atomicAdd(acum, local_acum);
}
void gpuPiWithReduction(const long cantidad_intervalos, const int num_blocks, const int num_threads, const int times = 1) {
printf("** Running the gpu code with reduction %d times **\n", times);
printf("* # of blocks: %d\n", num_blocks);
printf("* # of threads: %d\n", num_threads);
long total_time = 0;
clock_t start, end;
for(int iteration = 0; iteration < times; ++iteration) {
start = clock();
const int total_size = num_blocks * num_threads;
double *acum_arr = nullptr;
hipMallocManaged(&acum_arr, sizeof(double) * total_size);
hipLaunchKernelGGL(( singleGPUPi), dim3(num_blocks), dim3(num_threads), 0, 0, cantidad_intervalos, total_size, num_threads, acum_arr);
hipDeviceSynchronize();
double *acum = nullptr;
double final_acum = 0;
hipMallocManaged(&acum, sizeof(double));
hipLaunchKernelGGL(( singleReduction), dim3(1), dim3(num_threads), 0, 0, total_size, acum_arr, acum);
hipDeviceSynchronize();
final_acum = *acum;
hipFree(acum_arr);
hipFree(acum);
end = clock();
total_time += (end - start);
printf("Result = %20.18lf (%ld)\n", final_acum, end - start);
}
printf("** The average of %d runs was: %ld **\n\n", times, total_time / times);
}
void gpuPiWithoutReduction(const long cantidad_intervalos, const int num_blocks, const int num_threads, const int times = 1) {
printf("** Running the gpu code without reduction %d times **\n", times);
printf("* # of blocks: %d\n", num_blocks);
printf("* # of threads: %d\n", num_threads);
long total_time = 0;
clock_t start, end;
for(int iteration = 0; iteration < times; ++iteration) {
start = clock();
const int total_size = num_blocks * num_threads;
double *acum_arr = nullptr;
hipMallocManaged(&acum_arr, sizeof(double) * total_size);
hipLaunchKernelGGL(( singleGPUPi), dim3(num_blocks), dim3(num_threads), 0, 0, cantidad_intervalos, total_size, num_threads, acum_arr);
hipDeviceSynchronize();
double final_acum = 0;
for(int i = 0; i < total_size; ++i) {
final_acum += acum_arr[i];
}
hipFree(acum_arr);
end = clock();
total_time += (end - start);
printf("Result = %20.18lf (%ld)\n", final_acum, end - start);
}
printf("** The average of %d runs was: %ld **\n\n", times, total_time / times);
}
void gpuPiWithOMPReduction(const long cantidad_intervalos, const int num_blocks, const int num_threads, const int times = 1) {
printf("** Running the gpu code with OMP reduction %d times **\n", times);
printf("* # of blocks: %d\n", num_blocks);
printf("* # of threads: %d\n", num_threads);
long total_time = 0;
clock_t start, end;
for(int iteration = 0; iteration < times; ++iteration) {
start = clock();
const int total_size = num_blocks * num_threads;
double *acum_arr = nullptr;
hipMallocManaged(&acum_arr, sizeof(double) * total_size);
hipLaunchKernelGGL(( singleGPUPi), dim3(num_blocks), dim3(num_threads), 0, 0, cantidad_intervalos, total_size, num_threads, acum_arr);
hipDeviceSynchronize();
double final_acum = 0;
#pragma omp parallel for reduction(+:final_acum)
for(int i = 0; i < total_size; ++i) {
final_acum += acum_arr[i];
}
hipFree(acum_arr);
end = clock();
total_time += (end - start);
printf("Result = %20.18lf (%ld)\n", final_acum, end - start);
}
printf("** The average of %d runs was: %ld **\n\n", times, total_time / times);
}
void gpuPiAtomic(const long cantidad_intervalos, const int num_blocks, const int num_threads, const int times = 1) {
printf("** Running the gpu code Atomic %d times **\n", times);
printf("* # of blocks: %d\n", num_blocks);
printf("* # of threads: %d\n", num_threads);
long total_time = 0;
clock_t start, end;
for(int iteration = 0; iteration < times; ++iteration) {
start = clock();
const int total_size = num_blocks * num_threads;
double *acum = nullptr;
hipMallocManaged(&acum, sizeof(double));
hipLaunchKernelGGL(( singleGPUPiAtomic), dim3(num_blocks), dim3(num_threads), 0, 0, cantidad_intervalos, total_size, num_threads, acum);
hipDeviceSynchronize();
double final_acum = *acum;
hipFree(acum);
end = clock();
total_time += (end - start);
printf("Result = %20.18lf (%ld)\n", final_acum, end - start);
}
printf("** The average of %d runs was: %ld **\n\n", times, total_time / times);
}
int main() {
originalPi(kCantidadIntervalos, 5);
gpuPiWithReduction(kCantidadIntervalos, 16, 1024, 5);
gpuPiWithoutReduction(kCantidadIntervalos, 4, 1024, 5);
gpuPiWithOMPReduction(kCantidadIntervalos, 16, 1024, 5);
gpuPiAtomic(kCantidadIntervalos, 4, 1024, 5);
return 0;
}
|
fb729732f7e193095a4ed4bc57609232f722a681.cu
|
// Code made by Tomas Alejandro Lugo Salinas
// for the Hw7 of the lecture of Multiprocessors.
// Compiled in Windows 10 with: nvcc -Xcompiler "/openmp" -arch=sm_61 main.cu -o main.exe
// Executed in Windows 10 as: main.exe
#include <stdio.h>
#include <time.h>
#include <omp.h>
const long kCantidadIntervalos = 1000000000;
void originalPi(const long cantidad_intervalos, const int times = 1) {
printf("** Running the original code %d times **\n", times);
long total_time = 0;
double baseIntervalo;
double fdx;
double acum = 0;
clock_t start, end;
for(int iteration = 0; iteration < times; ++iteration) {
double x = 0;
long i;
baseIntervalo = 1.0 / cantidad_intervalos;
start = clock();
for (i = 0; i < cantidad_intervalos; i++) {
x = (i+0.5)*baseIntervalo;
fdx = 4 / (1 + x * x);
acum += fdx;
}
acum *= baseIntervalo;
end = clock();
total_time += (end - start);
printf("Result = %20.18lf (%ld)\n", acum, end - start);
}
printf("** The average of %d runs was: %ld **\n\n", times, total_time / times);
}
/////////////////// [ Begins code inspired from the internet ] //////////////////////
// The link of the reduction function guide is: https://riptutorial.com/cuda/example/22460/single-warp-parallel-reduction-for-commutative-operator
// Also, please note that a floating addition is not associative, so it gives some errors: https://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html
static const int warpSize = 32;
static const int blockSize = 1024;
__device__ int sumCommSingleWarp(volatile double* shArr) {
const int idx = threadIdx.x % warpSize; // The lane index in the warp
if (idx < 16) {
shArr[idx] += shArr[idx + 16];
shArr[idx] += shArr[idx + 8];
shArr[idx] += shArr[idx + 4];
shArr[idx] += shArr[idx + 2];
shArr[idx] += shArr[idx + 1];
}
return shArr[0];
}
__global__ void singleReduction(const int arraySize, const double *a, double *out) {
const int idx = threadIdx.x;
double sum = 0;
for (int i = idx; i < arraySize; i += blockSize)
sum += a[i];
__shared__ double r[blockSize];
r[idx] = sum;
sumCommSingleWarp(&r[idx & ~(warpSize-1)]);
__syncthreads();
if (idx<warpSize) { //first warp only
r[idx] = idx * warpSize < blockSize ? r[idx*warpSize] : 0;
sumCommSingleWarp(r);
if (idx == 0)
*out = r[0];
}
}
/////////////////// [ Ends code inspired from the internet ] //////////////////////
__global__ void singleGPUPi(const long cantidad_intervalos, const int total_threads, const int thread_per_block, double *acum_arr) {
const double base_intervalo = 1.0 / cantidad_intervalos;
const int idx = threadIdx.x + (blockIdx.x * thread_per_block);
const long intervalos_local = cantidad_intervalos / total_threads;
double x = base_intervalo * intervalos_local * idx;
double fdx;
double local_acum = 0;
for (long i = 0; i < intervalos_local; i++) {
fdx = 4.0 / (1 + x * x);
local_acum += (fdx * base_intervalo);
x = x + base_intervalo;
}
acum_arr[idx] = local_acum;
}
__global__ void singleGPUPiAtomic(const long cantidad_intervalos, const int total_threads, const int thread_per_block, double *acum) {
const double base_intervalo = 1.0 / cantidad_intervalos;
const int idx = threadIdx.x + (blockIdx.x * thread_per_block);
const long intervalos_local = cantidad_intervalos / total_threads;
double x = base_intervalo * intervalos_local * idx;
double fdx;
double local_acum = 0;
for (long i = 0; i < intervalos_local; i++) {
fdx = 4.0 / (1 + x * x);
local_acum += (fdx * base_intervalo);
x = x + base_intervalo;
}
atomicAdd(acum, local_acum);
}
void gpuPiWithReduction(const long cantidad_intervalos, const int num_blocks, const int num_threads, const int times = 1) {
printf("** Running the gpu code with reduction %d times **\n", times);
printf("* # of blocks: %d\n", num_blocks);
printf("* # of threads: %d\n", num_threads);
long total_time = 0;
clock_t start, end;
for(int iteration = 0; iteration < times; ++iteration) {
start = clock();
const int total_size = num_blocks * num_threads;
double *acum_arr = nullptr;
cudaMallocManaged(&acum_arr, sizeof(double) * total_size);
singleGPUPi<<<num_blocks, num_threads>>>(cantidad_intervalos, total_size, num_threads, acum_arr);
cudaDeviceSynchronize();
double *acum = nullptr;
double final_acum = 0;
cudaMallocManaged(&acum, sizeof(double));
singleReduction<<<1, num_threads>>>(total_size, acum_arr, acum);
cudaDeviceSynchronize();
final_acum = *acum;
cudaFree(acum_arr);
cudaFree(acum);
end = clock();
total_time += (end - start);
printf("Result = %20.18lf (%ld)\n", final_acum, end - start);
}
printf("** The average of %d runs was: %ld **\n\n", times, total_time / times);
}
void gpuPiWithoutReduction(const long cantidad_intervalos, const int num_blocks, const int num_threads, const int times = 1) {
printf("** Running the gpu code without reduction %d times **\n", times);
printf("* # of blocks: %d\n", num_blocks);
printf("* # of threads: %d\n", num_threads);
long total_time = 0;
clock_t start, end;
for(int iteration = 0; iteration < times; ++iteration) {
start = clock();
const int total_size = num_blocks * num_threads;
double *acum_arr = nullptr;
cudaMallocManaged(&acum_arr, sizeof(double) * total_size);
singleGPUPi<<<num_blocks, num_threads>>>(cantidad_intervalos, total_size, num_threads, acum_arr);
cudaDeviceSynchronize();
double final_acum = 0;
for(int i = 0; i < total_size; ++i) {
final_acum += acum_arr[i];
}
cudaFree(acum_arr);
end = clock();
total_time += (end - start);
printf("Result = %20.18lf (%ld)\n", final_acum, end - start);
}
printf("** The average of %d runs was: %ld **\n\n", times, total_time / times);
}
void gpuPiWithOMPReduction(const long cantidad_intervalos, const int num_blocks, const int num_threads, const int times = 1) {
printf("** Running the gpu code with OMP reduction %d times **\n", times);
printf("* # of blocks: %d\n", num_blocks);
printf("* # of threads: %d\n", num_threads);
long total_time = 0;
clock_t start, end;
for(int iteration = 0; iteration < times; ++iteration) {
start = clock();
const int total_size = num_blocks * num_threads;
double *acum_arr = nullptr;
cudaMallocManaged(&acum_arr, sizeof(double) * total_size);
singleGPUPi<<<num_blocks, num_threads>>>(cantidad_intervalos, total_size, num_threads, acum_arr);
cudaDeviceSynchronize();
double final_acum = 0;
#pragma omp parallel for reduction(+:final_acum)
for(int i = 0; i < total_size; ++i) {
final_acum += acum_arr[i];
}
cudaFree(acum_arr);
end = clock();
total_time += (end - start);
printf("Result = %20.18lf (%ld)\n", final_acum, end - start);
}
printf("** The average of %d runs was: %ld **\n\n", times, total_time / times);
}
void gpuPiAtomic(const long cantidad_intervalos, const int num_blocks, const int num_threads, const int times = 1) {
printf("** Running the gpu code Atomic %d times **\n", times);
printf("* # of blocks: %d\n", num_blocks);
printf("* # of threads: %d\n", num_threads);
long total_time = 0;
clock_t start, end;
for(int iteration = 0; iteration < times; ++iteration) {
start = clock();
const int total_size = num_blocks * num_threads;
double *acum = nullptr;
cudaMallocManaged(&acum, sizeof(double));
singleGPUPiAtomic<<<num_blocks, num_threads>>>(cantidad_intervalos, total_size, num_threads, acum);
cudaDeviceSynchronize();
double final_acum = *acum;
cudaFree(acum);
end = clock();
total_time += (end - start);
printf("Result = %20.18lf (%ld)\n", final_acum, end - start);
}
printf("** The average of %d runs was: %ld **\n\n", times, total_time / times);
}
int main() {
originalPi(kCantidadIntervalos, 5);
gpuPiWithReduction(kCantidadIntervalos, 16, 1024, 5);
gpuPiWithoutReduction(kCantidadIntervalos, 4, 1024, 5);
gpuPiWithOMPReduction(kCantidadIntervalos, 16, 1024, 5);
gpuPiAtomic(kCantidadIntervalos, 4, 1024, 5);
return 0;
}
|
3e5f807d0c86ff3eaed64c1293e1e4c8cc348049.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//note: please do not modify this file manually!
// this file has been generated automatically by BOAST version 0.99996
// by: make boast_kernels
/*
!=====================================================================
!
! S p e c f e m 3 D G l o b e V e r s i o n 7 . 0
! --------------------------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA
! and CNRS / University of Marseille, France
! (there are currently many more authors!)
! (c) Princeton University and CNRS / University of Marseille, April 2014
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#ifndef INDEX2
#define INDEX2(isize,i,j) i + isize*j
#endif
#ifndef INDEX3
#define INDEX3(isize,jsize,i,j,k) i + isize*(j + jsize*k)
#endif
#ifndef INDEX4
#define INDEX4(isize,jsize,ksize,i,j,k,x) i + isize*(j + jsize*(k + ksize*x))
#endif
#ifndef INDEX5
#define INDEX5(isize,jsize,ksize,xsize,i,j,k,x,y) i + isize*(j + jsize*(k + ksize*(x + xsize*y)))
#endif
#ifndef NDIM
#define NDIM 3
#endif
#ifndef NGLLX
#define NGLLX 5
#endif
#ifndef NGLL2
#define NGLL2 25
#endif
#ifndef NGLL3
#define NGLL3 125
#endif
#ifndef NGLL3_PADDED
#define NGLL3_PADDED 128
#endif
#ifndef N_SLS
#define N_SLS 3
#endif
#ifndef IREGION_CRUST_MANTLE
#define IREGION_CRUST_MANTLE 1
#endif
#ifndef IREGION_INNER_CORE
#define IREGION_INNER_CORE 3
#endif
#ifndef IFLAG_IN_FICTITIOUS_CUBE
#define IFLAG_IN_FICTITIOUS_CUBE 11
#endif
#ifndef R_EARTH_KM
#define R_EARTH_KM 6371.0f
#endif
#ifndef COLORING_MIN_NSPEC_INNER_CORE
#define COLORING_MIN_NSPEC_INNER_CORE 1000
#endif
#ifndef COLORING_MIN_NSPEC_OUTER_CORE
#define COLORING_MIN_NSPEC_OUTER_CORE 1000
#endif
#ifndef BLOCKSIZE_TRANSFER
#define BLOCKSIZE_TRANSFER 256
#endif
__global__ void compute_strength_noise_kernel(const float * displ, const int * ibelm_top, const int * ibool, const float * noise_surface_movie, const float * normal_x_noise, const float * normal_y_noise, const float * normal_z_noise, float * Sigma_kl, const float deltat, const int nspec_top){
int iface;
int ispec;
int igll;
int ipoin;
int i;
int j;
int k;
int iglob;
float eta;
iface = blockIdx.x + (blockIdx.y) * (gridDim.x);
if (iface < nspec_top) {
ispec = ibelm_top[iface] - (1);
igll = threadIdx.x;
ipoin = igll + (NGLL2) * (iface);
k = NGLLX - (1);
j = (igll) / (NGLLX);
i = igll - ((j) * (NGLLX));
iglob = ibool[INDEX4(NGLLX, NGLLX, NGLLX, i, j, k, ispec)] - (1);
eta = (noise_surface_movie[INDEX3(NDIM, NGLL2, 0, igll, iface)]) * (normal_x_noise[ipoin]) + (noise_surface_movie[INDEX3(NDIM, NGLL2, 1, igll, iface)]) * (normal_y_noise[ipoin]) + (noise_surface_movie[INDEX3(NDIM, NGLL2, 2, igll, iface)]) * (normal_z_noise[ipoin]);
Sigma_kl[INDEX4(NGLLX, NGLLX, NGLLX, i, j, k, ispec)] = Sigma_kl[INDEX4(NGLLX, NGLLX, NGLLX, i, j, k, ispec)] + ((deltat) * (eta)) * ((normal_x_noise[ipoin]) * (displ[0 + (3) * (iglob)]) + (normal_y_noise[ipoin]) * (displ[1 + (3) * (iglob)]) + (normal_z_noise[ipoin]) * (displ[2 + (3) * (iglob)]));
}
}
|
3e5f807d0c86ff3eaed64c1293e1e4c8cc348049.cu
|
//note: please do not modify this file manually!
// this file has been generated automatically by BOAST version 0.99996
// by: make boast_kernels
/*
!=====================================================================
!
! S p e c f e m 3 D G l o b e V e r s i o n 7 . 0
! --------------------------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA
! and CNRS / University of Marseille, France
! (there are currently many more authors!)
! (c) Princeton University and CNRS / University of Marseille, April 2014
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#ifndef INDEX2
#define INDEX2(isize,i,j) i + isize*j
#endif
#ifndef INDEX3
#define INDEX3(isize,jsize,i,j,k) i + isize*(j + jsize*k)
#endif
#ifndef INDEX4
#define INDEX4(isize,jsize,ksize,i,j,k,x) i + isize*(j + jsize*(k + ksize*x))
#endif
#ifndef INDEX5
#define INDEX5(isize,jsize,ksize,xsize,i,j,k,x,y) i + isize*(j + jsize*(k + ksize*(x + xsize*y)))
#endif
#ifndef NDIM
#define NDIM 3
#endif
#ifndef NGLLX
#define NGLLX 5
#endif
#ifndef NGLL2
#define NGLL2 25
#endif
#ifndef NGLL3
#define NGLL3 125
#endif
#ifndef NGLL3_PADDED
#define NGLL3_PADDED 128
#endif
#ifndef N_SLS
#define N_SLS 3
#endif
#ifndef IREGION_CRUST_MANTLE
#define IREGION_CRUST_MANTLE 1
#endif
#ifndef IREGION_INNER_CORE
#define IREGION_INNER_CORE 3
#endif
#ifndef IFLAG_IN_FICTITIOUS_CUBE
#define IFLAG_IN_FICTITIOUS_CUBE 11
#endif
#ifndef R_EARTH_KM
#define R_EARTH_KM 6371.0f
#endif
#ifndef COLORING_MIN_NSPEC_INNER_CORE
#define COLORING_MIN_NSPEC_INNER_CORE 1000
#endif
#ifndef COLORING_MIN_NSPEC_OUTER_CORE
#define COLORING_MIN_NSPEC_OUTER_CORE 1000
#endif
#ifndef BLOCKSIZE_TRANSFER
#define BLOCKSIZE_TRANSFER 256
#endif
__global__ void compute_strength_noise_kernel(const float * displ, const int * ibelm_top, const int * ibool, const float * noise_surface_movie, const float * normal_x_noise, const float * normal_y_noise, const float * normal_z_noise, float * Sigma_kl, const float deltat, const int nspec_top){
int iface;
int ispec;
int igll;
int ipoin;
int i;
int j;
int k;
int iglob;
float eta;
iface = blockIdx.x + (blockIdx.y) * (gridDim.x);
if (iface < nspec_top) {
ispec = ibelm_top[iface] - (1);
igll = threadIdx.x;
ipoin = igll + (NGLL2) * (iface);
k = NGLLX - (1);
j = (igll) / (NGLLX);
i = igll - ((j) * (NGLLX));
iglob = ibool[INDEX4(NGLLX, NGLLX, NGLLX, i, j, k, ispec)] - (1);
eta = (noise_surface_movie[INDEX3(NDIM, NGLL2, 0, igll, iface)]) * (normal_x_noise[ipoin]) + (noise_surface_movie[INDEX3(NDIM, NGLL2, 1, igll, iface)]) * (normal_y_noise[ipoin]) + (noise_surface_movie[INDEX3(NDIM, NGLL2, 2, igll, iface)]) * (normal_z_noise[ipoin]);
Sigma_kl[INDEX4(NGLLX, NGLLX, NGLLX, i, j, k, ispec)] = Sigma_kl[INDEX4(NGLLX, NGLLX, NGLLX, i, j, k, ispec)] + ((deltat) * (eta)) * ((normal_x_noise[ipoin]) * (displ[0 + (3) * (iglob)]) + (normal_y_noise[ipoin]) * (displ[1 + (3) * (iglob)]) + (normal_z_noise[ipoin]) * (displ[2 + (3) * (iglob)]));
}
}
|
ea148f9af02dae1f2b541128fbbbb66a0cd8f066.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda/helper_math.h"
#include "cuda/cudaNoise/cudaNoise.cuh"
__device__ const float timestep = %(sim_timestep)s;
__device__ const float SIM_HEIGHT_ABOVE = 0.2f;
__device__ const float SIM_HEIGHT_BELOW = 0.1f;
/*
* 3D -> 1D index in contiguous array
*/
__device__ uint make_idx(uint i, uint j, uint k)
{
return i * %(sim_h)s * %(sim_d)s + j * %(sim_d)s + k;
}
/*
* 2D -> 1D index in contiguous array
*/
__device__ uint make_flat_idx(uint i, uint j)
{
return i * %(sim_h)s + j;
}
// Simulation space spans heights from -0.1 below the lowest point to +0.2 above the highest one
__device__ float make_height(uint k)
{
return float(k) / %(sim_d)s * (1.0f + SIM_HEIGHT_ABOVE + SIM_HEIGHT_BELOW) - SIM_HEIGHT_BELOW;
}
/*
* Returns surface (ground of water) height at the location
*/
__device__ float make_surf_height(uint i, uint j, float *surface_height)
{
return max(surface_height[make_flat_idx(i, j)], %(sea_level)s);
}
/*
* Returns ground height at the location
*/
__device__ float make_ground_height(uint i, uint j, float *surface_height)
{
return surface_height[make_flat_idx(i, j)];
}
/*
* Calculates real-scale distance between simulation cells
*/
__device__ float make_cell_dist(int si, int sj, int sk,
int ti, int tj, int tk)
{
return sqrtf(powf((ti-si) / %(sim_d_scale)s * %(cell_scale)s, 2) + powf((tj-sj) / %(sim_d_scale)s * %(cell_scale)s, 2) + powf((tk-sk) * %(cell_scale)s, 2));
}
/*
* Tells whethes the cell is right at the surface (ground or water)
*/
__device__ bool is_surface(uint i, uint j, uint k, float *surface_height) {
if(make_height(k) < make_surf_height(i, j, surface_height)) {
if(
(i > 0 && make_height(k+1) >= make_surf_height(i-1, j, surface_height))
|| (i < %(sim_w)s-1 && make_height(k+1) >= make_surf_height(i+1, j, surface_height))
|| (j > 0 && make_height(k+1) >= make_surf_height(i, j-1, surface_height))
|| (j < %(sim_h)s-1 && make_height(k+1) >= make_surf_height(i, j+1, surface_height))
) {
return true;
}
}
return false;
}
/*
* Tells whether the cell if at the surface or above
*/
__device__ bool is_surface_or_above(uint i, uint j, uint k, float *surface_height) {
return is_surface(i, j, k, surface_height) || make_height(k) >= make_surf_height(i, j, surface_height);
}
/*
* Tells whether cells is not on the boundary (and >= surface)
*/
__device__ bool is_boundary(uint i, uint j, uint k, float *surface_height) {
if(min(i %% (%(sim_w)s-1), 1) + min(j %% (%(sim_h)s-1), 1) + min(k %% (%(sim_d)s-1), 1) < 3
|| (make_height(k) < make_surf_height(i, j, surface_height) && !is_surface(i, j, k, surface_height))) {
return true;
}
return false;
}
/*
* Returns value interpolated and real-valued simulation coordinates
*/
template<typename T>
__device__ T interp3(float i, float j, float k, T *f) {
float fraci = i - floorf(i);
float fracj = j - floorf(j);
float frack = k - floorf(k);
T f000 = f[make_idx( (int)floorf(i), (int)floorf(j), (int)floorf(k) )];
T f001 = f[make_idx( (int)floorf(i), (int)floorf(j), (int)ceilf(k) )];
T f010 = f[make_idx( (int)floorf(i), (int)ceilf(j), (int)floorf(k) )];
T f011 = f[make_idx( (int)floorf(i), (int)ceilf(j), (int)ceilf(k) )];
T f100 = f[make_idx( (int)ceilf(i), (int)floorf(j), (int)floorf(k) )];
T f101 = f[make_idx( (int)ceilf(i), (int)floorf(j), (int)ceilf(k) )];
T f110 = f[make_idx( (int)ceilf(i), (int)ceilf(j), (int)floorf(k) )];
T f111 = f[make_idx( (int)ceilf(i), (int)ceilf(j), (int)ceilf(k) )];
return lerp(
lerp(
lerp(f000, f001, frack),
lerp(f010, f011, frack),
fracj
),
lerp(
lerp(f100, f101, frack),
lerp(f110, f111, frack),
fracj
),
fraci
);
}
/*
* Jacobi iteration method for solving Poisson equations
*/
template<typename T>
__device__ void jacobi(float alpha, float rBeta, T *x, T *next_x, T *b, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
T x001 = x[make_idx(i, j, k-1)];
T x002 = x[make_idx(i, j, k+1)];
T x010 = x[make_idx(i, j-1, k)];
T x020 = x[make_idx(i, j+1, k)];
T x100 = x[make_idx(i-1, j, k)];
T x200 = x[make_idx(i+1, j, k)];
T v = b[make_idx(i, j, k)];
T r = (x001 + x002 + x010 + x020 + x100 + x200 + alpha * v) * rBeta;
next_x[make_idx(i, j, k)] = r;
}
/*
* Jacobi iteration method for solving Poisson equations, when solving for air movement field itself
*/
template<typename T>
__device__ void jacobi_auto(float alpha, float rBeta, T *x, T *next_x, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
T x001 = x[make_idx(i, j, k-1)];
T x002 = x[make_idx(i, j, k+1)];
T x010 = x[make_idx(i, j-1, k)];
T x020 = x[make_idx(i, j+1, k)];
T x100 = x[make_idx(i-1, j, k)];
T x200 = x[make_idx(i+1, j, k)];
T v = x[make_idx(i, j, k)];
T r = (x001 + x002 + x010 + x020 + x100 + x200 + alpha * v) * rBeta;
next_x[make_idx(i, j, k)] = r;
}
extern "C" {
/*
* Zeroes out the tensor
*/
__global__ void zero3d(float *x) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
x[make_idx(i, j, k)] = 0.0f;
}
__global__ void copy3d(float *from, float *to) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
to[make_idx(i, j, k)] = from[make_idx(i, j, k)];
}
/*
* Transport matter attrobuted usign the velocity field
*/
__global__ void advect(float rdx, float3 *velocity, float *m, float *next_m, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
float3 v = velocity[make_idx(i, j, k)];
float3 p = make_float3(float(i), float(j), float(k));
p -= v * rdx * timestep;
// Clamp coords
p.x = fminf(fmaxf(p.x, 0.0f), %(sim_w)s-1.0f);
p.y = fminf(fmaxf(p.y, 0.0f), %(sim_h)s-1.0f);
p.z = fminf(fmaxf(p.z, 0.0f), %(sim_d)s-1.0f);
next_m[make_idx(i, j, k)] = interp3(p.x, p.y, p.z, m);
}
/*
* Transport matter velocity field itself
*/
__global__ void advect_self(float rdx, float3 *velocity, float3 *next_m, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
float3 v = velocity[make_idx(i, j, k)];
float3 p = make_float3(float(i), float(j), float(k));
p -= v * rdx * timestep;
// Clamp coords
p.x = fminf(fmaxf(p.x, 0.0f), %(sim_w)s-1.0f);
p.y = fminf(fmaxf(p.y, 0.0f), %(sim_h)s-1.0f);
p.z = fminf(fmaxf(p.z, 0.0f), %(sim_d)s-1.0f);
next_m[make_idx(i, j, k)] = interp3(p.x, p.y, p.z, velocity);
}
__global__ void jacobi_f(float alpha, float rBeta, float *x, float *next_x, float *b, float *surface_height) {
jacobi(alpha, rBeta, x, next_x, b, surface_height);
}
__global__ void jacobi_f3(float alpha, float rBeta, float3 *x, float3 *next_x, float *surface_height) {
jacobi_auto(alpha, rBeta, x, next_x, surface_height);
}
/*
* Sun heating the ground, air cooling with ascent, heat outflux into the outer scape
*/
__global__ void temperature_flux(float *temperature, float *next_temperature, float3 *velocity,
float *surface_height, float sim_time, float *humidity,
float *precipitation, float *last_precipitation) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
float sheight = make_height(k);
float flux = 0.0f;
// Different kinds of temperature dynamics
//float time_flux = (sin(sim_time * 0.05f + float(i) * 0.01f) + 1.0f) / 2.0f;
float time_flux = 1.0f; //max(sin(sim_time * 0.03f + float(i) * 0.02f), 0.0f); // + float(j) * 0.008f
//time_flux = (sin(sim_time * 0.1f) + 1.0f) / 2.0f;
time_flux = (0.1f + 0.9f * (float(j) / %(sim_h)s)) // Sphericity - poles are colder
* (sin(sim_time * 0.1f + float(i) * 0.02f) + 1.0f) / 2.0f; // Day-night cycles
if(make_ground_height(i, j, surface_height) >= %(sea_level)s) {
// How much rain in this column?
float col_precipitation = 0.0f;
for(int l = 0; l < %(sim_d)s; l++) {
col_precipitation += precipitation[make_idx(i, j, l)] - last_precipitation[make_idx(i, j, l)];
}
// Sun heating the ground
const float SUN_HEAT = 0.5f;
flux += SUN_HEAT * (1.0f / pow(max((sheight - make_surf_height(i, j, surface_height)) * %(sim_h)s, 1.0f), 2.0f)) * timestep * time_flux;
flux -= 0.1f * col_precipitation * timestep * time_flux;
} else if(sheight < make_surf_height(i, j, surface_height) && sheight <= %(sea_level)s) {
// How much rain in this column?
float col_precipitation = 0.0f;
for(int l = 0; l < %(sim_d)s; l++) {
col_precipitation += precipitation[make_idx(i, j, l)] - last_precipitation[make_idx(i, j, l)];
}
// Sun heating the water
const float SUN_WATER_HEAT = 0.05f;
flux += SUN_WATER_HEAT * (1.0f / pow(max((sheight - make_surf_height(i, j, surface_height)) * %(sim_h)s, 1.0f), 2.0f)) * timestep * time_flux;
//flux -= 0.1f * col_precipitation * timestep * time_flux;
}
// Cooling with ascent / heating with descent
if(velocity[make_idx(i, j, k)].z >= 0) {
const float ASCENT_COOL_RATE = 0.02f;
flux += temperature[make_idx(i, j, k)] * -velocity[make_idx(i, j, k)].z * ASCENT_COOL_RATE * timestep;
} else {
const float DESCENT_HEAT_RATE = 0.015f;
flux += temperature[make_idx(i, j, k)] * velocity[make_idx(i, j, k)].z * DESCENT_HEAT_RATE * timestep;
}
if( k == %(sim_d)s-2 ) {
// Cool air in the top cells
const float OUTFLUX_RATE = 0.005f;
flux -= temperature[make_idx(i, j, k)] * OUTFLUX_RATE * timestep;
}
next_temperature[make_idx(i, j, k)] = temperature[make_idx(i, j, k)] + flux;
}
/*
* Hot air floats, cold air falls
*/
__global__ void convection(float3 *velocity, float3 *next_velocity,
float *surface_height,
float *temperature, float ambient_temperature,
float *humidity, float ambient_humidity, float sim_time) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
float stemperature = temperature[make_idx(i, j, k)];
float shumidity = humidity[make_idx(i, j, k)];
float nval = 1e-4;
float3 temperature_gradient = make_float3(0.0f, 0.0f, 0.0f);
if(!is_boundary(i+1, j, k, surface_height)) {
temperature_gradient += (stemperature - temperature[make_idx(i+1, j, k)]) * make_float3(1.0f, 0.0f, 0.0f);
nval += 1;
}
if(!is_boundary(i-1, j, k, surface_height)) {
temperature_gradient += (stemperature - temperature[make_idx(i-1, j, k)]) * make_float3(-1.0f, 0.0f, 0.0f);
nval += 1;
}
if(!is_boundary(i, j+1, k, surface_height)) {
temperature_gradient += (stemperature - temperature[make_idx(i, j+1, k)]) * make_float3(0.0f, 1.0f, 0.0f);
nval += 1;
}
if(!is_boundary(i, j-1, k, surface_height)) {
temperature_gradient += (stemperature - temperature[make_idx(i, j-1, k)]) * make_float3(0.0f, -1.0f, 0.0f);
nval += 1;
}
temperature_gradient /= nval;
temperature_gradient *= 1.0f; // Scale the horizontal gradient
// Hot air floats, cold air falls
const float BUOYANCY_RATE = 0.2f;
//const uint seed = 1;
uint seed = (uint)( sim_time * 1000 / 127 );
// TODO: More humid air should be heavier
float convection_speed = BUOYANCY_RATE * (
stemperature - ambient_temperature
- (shumidity - ambient_humidity) * 0.2f
);
uint cell_hash = cudaNoise::calcPerm12(seed + i + cudaNoise::calcPerm(seed + j + cudaNoise::calcPerm(seed + k)));
next_velocity[make_idx(i, j, k)] = velocity[make_idx(i, j, k)]
+ make_float3(
abs(convection_speed) * temperature_gradient.x,
abs(convection_speed) * temperature_gradient.y,
convection_speed * 1.0f
);
}
/*
* This is exactly how clouds form and make rain. Water from rivers, lakes, streams, or oceans evaporates into the air when it is heated up by the sun. As the water vapor rises up in the air, it condenses, or starts to cool down and turns back into a liquid. Then, droplets of water start to stick together as clouds. When enough droplets stick together in the clouds, they become large and heavy and are pulled down towards the earth by the force of gravity. When water drops fall from clouds, it is called rain. Sometimes the droplets freeze before they get to the ground and become hail, sleet, or snow!
* Ref: https://learning-center.homesciencetools.com/article/clouds-and-rain/
*/
__global__ void water_cycle(float *humidity,
float *precipitation,
float *surface_height, float *temperature) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
uint idx = make_idx(i, j, k);
float sheight = make_height(k);
float stemperature = temperature[idx];
if( is_boundary(i, j, k, surface_height) )
return;
if(make_ground_height(i, j, surface_height) < %(sea_level)s) { // && sheight <= %(sea_level)s
// Absorb surface water
const float WATER_ABSORB_RATE = 1.0f;
humidity[idx] += WATER_ABSORB_RATE * (1.0f / pow(max((sheight - make_surf_height(i, j, surface_height)) * %(sim_h)s, 1.0f), 2.0f)) * timestep;
}
if(make_ground_height(i, j, surface_height) >= %(sea_level)s && is_surface(i, j, k, surface_height)) {
// Fresh water evaporation & evapotranspiration
const float WATER_ABSORB_RATE = 0.1f;
humidity[idx] += WATER_ABSORB_RATE * (1.0f / pow(max((sheight - make_surf_height(i, j, surface_height)) * %(sim_h)s, 1.0f), 2.0f)) * max(1.0f - sheight, 0.0f) * timestep;
}
float shumidity = humidity[idx];
// Condensate excess vapor
float dew_point = stemperature / sheight * 0.5f;
if(shumidity > dew_point) {
humidity[idx] += (dew_point - shumidity) * timestep;
if(make_ground_height(i, j, surface_height) > %(sea_level)s)
precipitation[idx] += fabsf(dew_point - shumidity) * timestep;
}
}
/*
* Calculate divergence. `halfrdx` is 0.5 / gridscale
*/
__global__ void divergence(float halfrdx, float3 *w, float *d, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
float3 w100 = w[make_idx(i-1, j, k)];
float3 w200 = w[make_idx(i+1, j, k)];
float3 w010 = w[make_idx(i, j-1, k)];
float3 w020 = w[make_idx(i, j+1, k)];
float3 w001 = w[make_idx(i, j, k-1)];
float3 w002 = w[make_idx(i, j, k+1)];
d[make_idx(i, j, k)] = halfrdx * ((w200.x - w100.x) + (w020.y - w010.y) + (w002.z - w001.z));
}
/*
* Deduce the pressure gradient from air velocity to preserve consistency
*/
__global__ void gradient(float halfrdx, float *p, float3 *w, float3 *u, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
float p100 = p[make_idx(i-1, j, k)];
float p200 = p[make_idx(i+1, j, k)];
float p010 = p[make_idx(i, j-1, k)];
float p020 = p[make_idx(i, j+1, k)];
float p001 = p[make_idx(i, j, k-1)];
float p002 = p[make_idx(i, j, k+1)];
u[make_idx(i, j, k)] = w[make_idx(i, j, k)] - halfrdx * make_float3(p200-p100, p020-p010, p002-p001);
}
/*
* Sets valid boundary values
*/
__global__ void boundary_f(float scale, float *x, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( !is_boundary(i, j, k, surface_height) )
return;
float val = 0.0f * x[make_idx(i, j, k)];
float nval = 1e-4;
if( i > 0 && !is_boundary(i-1, j, k, surface_height) ) {
val += x[make_idx(i-1, j, k)];
nval += 1;
}
if( i < %(sim_w)s-1 && !is_boundary(i+1, j, k, surface_height) ) {
val += x[make_idx(i+1, j, k)];
nval += 1;
}
if( j > 0 && !is_boundary(i, j-1, k, surface_height) ) {
val += x[make_idx(i, j-1, k)];
nval += 1;
}
if( j < %(sim_h)s-1 && !is_boundary(i, j+1, k, surface_height) ) {
val += x[make_idx(i, j+1, k)];
nval += 1;
}
if( k > 0 && !is_boundary(i, j, k-1, surface_height) ) {
val += x[make_idx(i, j, k-1)];
nval += 1;
}
if( k < %(sim_d)s-1 && !is_boundary(i, j, k+1, surface_height) ) {
val += x[make_idx(i, j, k+1)];
nval += 1;
}
x[make_idx(i, j, k)] = scale * val / nval;
}
/*
* Sets valid boundary values
*/
__global__ void boundary_f3(float scale, float3 *x, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( !is_boundary(i, j, k, surface_height) )
return;
float3 val = 0.0f * x[make_idx(i, j, k)];
float nval = 1e-4;
if( i > 0 && !is_boundary(i-1, j, k, surface_height) ) {
val += make_float3(scale * x[make_idx(i-1, j, k)].x, x[make_idx(i-1, j, k)].y, x[make_idx(i-1, j, k)].z); // x[make_idx(i-1, j, k)];
nval += 1;
}
if( i < %(sim_w)s-1 && !is_boundary(i+1, j, k, surface_height) ) {
val += make_float3(scale * x[make_idx(i+1, j, k)].x, x[make_idx(i+1, j, k)].y, x[make_idx(i+1, j, k)].z); // x[make_idx(i+1, j, k)];
nval += 1;
}
if( j > 0 && !is_boundary(i, j-1, k, surface_height) ) {
val += make_float3(x[make_idx(i, j-1, k)].x, scale * x[make_idx(i, j-1, k)].y, x[make_idx(i, j-1, k)].z); // x[make_idx(i, j-1, k)];
nval += 1;
}
if( j < %(sim_h)s-1 && !is_boundary(i, j+1, k, surface_height) ) {
val += make_float3(x[make_idx(i, j+1, k)].x, scale * x[make_idx(i, j+1, k)].y, x[make_idx(i, j+1, k)].z); // x[make_idx(i, j+1, k)];
nval += 1;
}
if( k > 0 && !is_boundary(i, j, k-1, surface_height) ) {
val += make_float3(x[make_idx(i, j, k-1)].x, x[make_idx(i, j, k-1)].y, scale * x[make_idx(i, j, k-1)].z); // x[make_idx(i, j, k-1)];
nval += 1;
}
if( k < %(sim_d)s-1 && !is_boundary(i, j, k+1, surface_height) ) {
val += make_float3(x[make_idx(i, j, k+1)].x, x[make_idx(i, j, k+1)].y, scale * x[make_idx(i, j, k+1)].z); // x[make_idx(i, j, k+1)];
nval += 1;
}
x[make_idx(i, j, k)] = val / nval;
}
}
|
ea148f9af02dae1f2b541128fbbbb66a0cd8f066.cu
|
#include "cuda/helper_math.h"
#include "cuda/cudaNoise/cudaNoise.cuh"
__device__ const float timestep = %(sim_timestep)s;
__device__ const float SIM_HEIGHT_ABOVE = 0.2f;
__device__ const float SIM_HEIGHT_BELOW = 0.1f;
/*
* 3D -> 1D index in contiguous array
*/
__device__ uint make_idx(uint i, uint j, uint k)
{
return i * %(sim_h)s * %(sim_d)s + j * %(sim_d)s + k;
}
/*
* 2D -> 1D index in contiguous array
*/
__device__ uint make_flat_idx(uint i, uint j)
{
return i * %(sim_h)s + j;
}
// Simulation space spans heights from -0.1 below the lowest point to +0.2 above the highest one
__device__ float make_height(uint k)
{
return float(k) / %(sim_d)s * (1.0f + SIM_HEIGHT_ABOVE + SIM_HEIGHT_BELOW) - SIM_HEIGHT_BELOW;
}
/*
* Returns surface (ground of water) height at the location
*/
__device__ float make_surf_height(uint i, uint j, float *surface_height)
{
return max(surface_height[make_flat_idx(i, j)], %(sea_level)s);
}
/*
* Returns ground height at the location
*/
__device__ float make_ground_height(uint i, uint j, float *surface_height)
{
return surface_height[make_flat_idx(i, j)];
}
/*
* Calculates real-scale distance between simulation cells
*/
__device__ float make_cell_dist(int si, int sj, int sk,
int ti, int tj, int tk)
{
return sqrtf(powf((ti-si) / %(sim_d_scale)s * %(cell_scale)s, 2) + powf((tj-sj) / %(sim_d_scale)s * %(cell_scale)s, 2) + powf((tk-sk) * %(cell_scale)s, 2));
}
/*
* Tells whethes the cell is right at the surface (ground or water)
*/
__device__ bool is_surface(uint i, uint j, uint k, float *surface_height) {
if(make_height(k) < make_surf_height(i, j, surface_height)) {
if(
(i > 0 && make_height(k+1) >= make_surf_height(i-1, j, surface_height))
|| (i < %(sim_w)s-1 && make_height(k+1) >= make_surf_height(i+1, j, surface_height))
|| (j > 0 && make_height(k+1) >= make_surf_height(i, j-1, surface_height))
|| (j < %(sim_h)s-1 && make_height(k+1) >= make_surf_height(i, j+1, surface_height))
) {
return true;
}
}
return false;
}
/*
* Tells whether the cell if at the surface or above
*/
__device__ bool is_surface_or_above(uint i, uint j, uint k, float *surface_height) {
return is_surface(i, j, k, surface_height) || make_height(k) >= make_surf_height(i, j, surface_height);
}
/*
* Tells whether cells is not on the boundary (and >= surface)
*/
__device__ bool is_boundary(uint i, uint j, uint k, float *surface_height) {
if(min(i %% (%(sim_w)s-1), 1) + min(j %% (%(sim_h)s-1), 1) + min(k %% (%(sim_d)s-1), 1) < 3
|| (make_height(k) < make_surf_height(i, j, surface_height) && !is_surface(i, j, k, surface_height))) {
return true;
}
return false;
}
/*
* Returns value interpolated and real-valued simulation coordinates
*/
template<typename T>
__device__ T interp3(float i, float j, float k, T *f) {
float fraci = i - floorf(i);
float fracj = j - floorf(j);
float frack = k - floorf(k);
T f000 = f[make_idx( (int)floorf(i), (int)floorf(j), (int)floorf(k) )];
T f001 = f[make_idx( (int)floorf(i), (int)floorf(j), (int)ceilf(k) )];
T f010 = f[make_idx( (int)floorf(i), (int)ceilf(j), (int)floorf(k) )];
T f011 = f[make_idx( (int)floorf(i), (int)ceilf(j), (int)ceilf(k) )];
T f100 = f[make_idx( (int)ceilf(i), (int)floorf(j), (int)floorf(k) )];
T f101 = f[make_idx( (int)ceilf(i), (int)floorf(j), (int)ceilf(k) )];
T f110 = f[make_idx( (int)ceilf(i), (int)ceilf(j), (int)floorf(k) )];
T f111 = f[make_idx( (int)ceilf(i), (int)ceilf(j), (int)ceilf(k) )];
return lerp(
lerp(
lerp(f000, f001, frack),
lerp(f010, f011, frack),
fracj
),
lerp(
lerp(f100, f101, frack),
lerp(f110, f111, frack),
fracj
),
fraci
);
}
/*
* Jacobi iteration method for solving Poisson equations
*/
template<typename T>
__device__ void jacobi(float alpha, float rBeta, T *x, T *next_x, T *b, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
T x001 = x[make_idx(i, j, k-1)];
T x002 = x[make_idx(i, j, k+1)];
T x010 = x[make_idx(i, j-1, k)];
T x020 = x[make_idx(i, j+1, k)];
T x100 = x[make_idx(i-1, j, k)];
T x200 = x[make_idx(i+1, j, k)];
T v = b[make_idx(i, j, k)];
T r = (x001 + x002 + x010 + x020 + x100 + x200 + alpha * v) * rBeta;
next_x[make_idx(i, j, k)] = r;
}
/*
* Jacobi iteration method for solving Poisson equations, when solving for air movement field itself
*/
template<typename T>
__device__ void jacobi_auto(float alpha, float rBeta, T *x, T *next_x, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
T x001 = x[make_idx(i, j, k-1)];
T x002 = x[make_idx(i, j, k+1)];
T x010 = x[make_idx(i, j-1, k)];
T x020 = x[make_idx(i, j+1, k)];
T x100 = x[make_idx(i-1, j, k)];
T x200 = x[make_idx(i+1, j, k)];
T v = x[make_idx(i, j, k)];
T r = (x001 + x002 + x010 + x020 + x100 + x200 + alpha * v) * rBeta;
next_x[make_idx(i, j, k)] = r;
}
extern "C" {
/*
* Zeroes out the tensor
*/
__global__ void zero3d(float *x) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
x[make_idx(i, j, k)] = 0.0f;
}
__global__ void copy3d(float *from, float *to) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
to[make_idx(i, j, k)] = from[make_idx(i, j, k)];
}
/*
* Transport matter attrobuted usign the velocity field
*/
__global__ void advect(float rdx, float3 *velocity, float *m, float *next_m, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
float3 v = velocity[make_idx(i, j, k)];
float3 p = make_float3(float(i), float(j), float(k));
p -= v * rdx * timestep;
// Clamp coords
p.x = fminf(fmaxf(p.x, 0.0f), %(sim_w)s-1.0f);
p.y = fminf(fmaxf(p.y, 0.0f), %(sim_h)s-1.0f);
p.z = fminf(fmaxf(p.z, 0.0f), %(sim_d)s-1.0f);
next_m[make_idx(i, j, k)] = interp3(p.x, p.y, p.z, m);
}
/*
* Transport matter velocity field itself
*/
__global__ void advect_self(float rdx, float3 *velocity, float3 *next_m, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
float3 v = velocity[make_idx(i, j, k)];
float3 p = make_float3(float(i), float(j), float(k));
p -= v * rdx * timestep;
// Clamp coords
p.x = fminf(fmaxf(p.x, 0.0f), %(sim_w)s-1.0f);
p.y = fminf(fmaxf(p.y, 0.0f), %(sim_h)s-1.0f);
p.z = fminf(fmaxf(p.z, 0.0f), %(sim_d)s-1.0f);
next_m[make_idx(i, j, k)] = interp3(p.x, p.y, p.z, velocity);
}
__global__ void jacobi_f(float alpha, float rBeta, float *x, float *next_x, float *b, float *surface_height) {
jacobi(alpha, rBeta, x, next_x, b, surface_height);
}
__global__ void jacobi_f3(float alpha, float rBeta, float3 *x, float3 *next_x, float *surface_height) {
jacobi_auto(alpha, rBeta, x, next_x, surface_height);
}
/*
* Sun heating the ground, air cooling with ascent, heat outflux into the outer scape
*/
__global__ void temperature_flux(float *temperature, float *next_temperature, float3 *velocity,
float *surface_height, float sim_time, float *humidity,
float *precipitation, float *last_precipitation) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
float sheight = make_height(k);
float flux = 0.0f;
// Different kinds of temperature dynamics
//float time_flux = (sin(sim_time * 0.05f + float(i) * 0.01f) + 1.0f) / 2.0f;
float time_flux = 1.0f; //max(sin(sim_time * 0.03f + float(i) * 0.02f), 0.0f); // + float(j) * 0.008f
//time_flux = (sin(sim_time * 0.1f) + 1.0f) / 2.0f;
time_flux = (0.1f + 0.9f * (float(j) / %(sim_h)s)) // Sphericity - poles are colder
* (sin(sim_time * 0.1f + float(i) * 0.02f) + 1.0f) / 2.0f; // Day-night cycles
if(make_ground_height(i, j, surface_height) >= %(sea_level)s) {
// How much rain in this column?
float col_precipitation = 0.0f;
for(int l = 0; l < %(sim_d)s; l++) {
col_precipitation += precipitation[make_idx(i, j, l)] - last_precipitation[make_idx(i, j, l)];
}
// Sun heating the ground
const float SUN_HEAT = 0.5f;
flux += SUN_HEAT * (1.0f / pow(max((sheight - make_surf_height(i, j, surface_height)) * %(sim_h)s, 1.0f), 2.0f)) * timestep * time_flux;
flux -= 0.1f * col_precipitation * timestep * time_flux;
} else if(sheight < make_surf_height(i, j, surface_height) && sheight <= %(sea_level)s) {
// How much rain in this column?
float col_precipitation = 0.0f;
for(int l = 0; l < %(sim_d)s; l++) {
col_precipitation += precipitation[make_idx(i, j, l)] - last_precipitation[make_idx(i, j, l)];
}
// Sun heating the water
const float SUN_WATER_HEAT = 0.05f;
flux += SUN_WATER_HEAT * (1.0f / pow(max((sheight - make_surf_height(i, j, surface_height)) * %(sim_h)s, 1.0f), 2.0f)) * timestep * time_flux;
//flux -= 0.1f * col_precipitation * timestep * time_flux;
}
// Cooling with ascent / heating with descent
if(velocity[make_idx(i, j, k)].z >= 0) {
const float ASCENT_COOL_RATE = 0.02f;
flux += temperature[make_idx(i, j, k)] * -velocity[make_idx(i, j, k)].z * ASCENT_COOL_RATE * timestep;
} else {
const float DESCENT_HEAT_RATE = 0.015f;
flux += temperature[make_idx(i, j, k)] * velocity[make_idx(i, j, k)].z * DESCENT_HEAT_RATE * timestep;
}
if( k == %(sim_d)s-2 ) {
// Cool air in the top cells
const float OUTFLUX_RATE = 0.005f;
flux -= temperature[make_idx(i, j, k)] * OUTFLUX_RATE * timestep;
}
next_temperature[make_idx(i, j, k)] = temperature[make_idx(i, j, k)] + flux;
}
/*
* Hot air floats, cold air falls
*/
__global__ void convection(float3 *velocity, float3 *next_velocity,
float *surface_height,
float *temperature, float ambient_temperature,
float *humidity, float ambient_humidity, float sim_time) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
float stemperature = temperature[make_idx(i, j, k)];
float shumidity = humidity[make_idx(i, j, k)];
float nval = 1e-4;
float3 temperature_gradient = make_float3(0.0f, 0.0f, 0.0f);
if(!is_boundary(i+1, j, k, surface_height)) {
temperature_gradient += (stemperature - temperature[make_idx(i+1, j, k)]) * make_float3(1.0f, 0.0f, 0.0f);
nval += 1;
}
if(!is_boundary(i-1, j, k, surface_height)) {
temperature_gradient += (stemperature - temperature[make_idx(i-1, j, k)]) * make_float3(-1.0f, 0.0f, 0.0f);
nval += 1;
}
if(!is_boundary(i, j+1, k, surface_height)) {
temperature_gradient += (stemperature - temperature[make_idx(i, j+1, k)]) * make_float3(0.0f, 1.0f, 0.0f);
nval += 1;
}
if(!is_boundary(i, j-1, k, surface_height)) {
temperature_gradient += (stemperature - temperature[make_idx(i, j-1, k)]) * make_float3(0.0f, -1.0f, 0.0f);
nval += 1;
}
temperature_gradient /= nval;
temperature_gradient *= 1.0f; // Scale the horizontal gradient
// Hot air floats, cold air falls
const float BUOYANCY_RATE = 0.2f;
//const uint seed = 1;
uint seed = (uint)( sim_time * 1000 / 127 );
// TODO: More humid air should be heavier
float convection_speed = BUOYANCY_RATE * (
stemperature - ambient_temperature
- (shumidity - ambient_humidity) * 0.2f
);
uint cell_hash = cudaNoise::calcPerm12(seed + i + cudaNoise::calcPerm(seed + j + cudaNoise::calcPerm(seed + k)));
next_velocity[make_idx(i, j, k)] = velocity[make_idx(i, j, k)]
+ make_float3(
abs(convection_speed) * temperature_gradient.x,
abs(convection_speed) * temperature_gradient.y,
convection_speed * 1.0f
);
}
/*
* This is exactly how clouds form and make rain. Water from rivers, lakes, streams, or oceans evaporates into the air when it is heated up by the sun. As the water vapor rises up in the air, it condenses, or starts to cool down and turns back into a liquid. Then, droplets of water start to stick together as clouds. When enough droplets stick together in the clouds, they become large and heavy and are pulled down towards the earth by the force of gravity. When water drops fall from clouds, it is called rain. Sometimes the droplets freeze before they get to the ground and become hail, sleet, or snow!
* Ref: https://learning-center.homesciencetools.com/article/clouds-and-rain/
*/
__global__ void water_cycle(float *humidity,
float *precipitation,
float *surface_height, float *temperature) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
uint idx = make_idx(i, j, k);
float sheight = make_height(k);
float stemperature = temperature[idx];
if( is_boundary(i, j, k, surface_height) )
return;
if(make_ground_height(i, j, surface_height) < %(sea_level)s) { // && sheight <= %(sea_level)s
// Absorb surface water
const float WATER_ABSORB_RATE = 1.0f;
humidity[idx] += WATER_ABSORB_RATE * (1.0f / pow(max((sheight - make_surf_height(i, j, surface_height)) * %(sim_h)s, 1.0f), 2.0f)) * timestep;
}
if(make_ground_height(i, j, surface_height) >= %(sea_level)s && is_surface(i, j, k, surface_height)) {
// Fresh water evaporation & evapotranspiration
const float WATER_ABSORB_RATE = 0.1f;
humidity[idx] += WATER_ABSORB_RATE * (1.0f / pow(max((sheight - make_surf_height(i, j, surface_height)) * %(sim_h)s, 1.0f), 2.0f)) * max(1.0f - sheight, 0.0f) * timestep;
}
float shumidity = humidity[idx];
// Condensate excess vapor
float dew_point = stemperature / sheight * 0.5f;
if(shumidity > dew_point) {
humidity[idx] += (dew_point - shumidity) * timestep;
if(make_ground_height(i, j, surface_height) > %(sea_level)s)
precipitation[idx] += fabsf(dew_point - shumidity) * timestep;
}
}
/*
* Calculate divergence. `halfrdx` is 0.5 / gridscale
*/
__global__ void divergence(float halfrdx, float3 *w, float *d, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
float3 w100 = w[make_idx(i-1, j, k)];
float3 w200 = w[make_idx(i+1, j, k)];
float3 w010 = w[make_idx(i, j-1, k)];
float3 w020 = w[make_idx(i, j+1, k)];
float3 w001 = w[make_idx(i, j, k-1)];
float3 w002 = w[make_idx(i, j, k+1)];
d[make_idx(i, j, k)] = halfrdx * ((w200.x - w100.x) + (w020.y - w010.y) + (w002.z - w001.z));
}
/*
* Deduce the pressure gradient from air velocity to preserve consistency
*/
__global__ void gradient(float halfrdx, float *p, float3 *w, float3 *u, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( is_boundary(i, j, k, surface_height) )
return;
float p100 = p[make_idx(i-1, j, k)];
float p200 = p[make_idx(i+1, j, k)];
float p010 = p[make_idx(i, j-1, k)];
float p020 = p[make_idx(i, j+1, k)];
float p001 = p[make_idx(i, j, k-1)];
float p002 = p[make_idx(i, j, k+1)];
u[make_idx(i, j, k)] = w[make_idx(i, j, k)] - halfrdx * make_float3(p200-p100, p020-p010, p002-p001);
}
/*
* Sets valid boundary values
*/
__global__ void boundary_f(float scale, float *x, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( !is_boundary(i, j, k, surface_height) )
return;
float val = 0.0f * x[make_idx(i, j, k)];
float nval = 1e-4;
if( i > 0 && !is_boundary(i-1, j, k, surface_height) ) {
val += x[make_idx(i-1, j, k)];
nval += 1;
}
if( i < %(sim_w)s-1 && !is_boundary(i+1, j, k, surface_height) ) {
val += x[make_idx(i+1, j, k)];
nval += 1;
}
if( j > 0 && !is_boundary(i, j-1, k, surface_height) ) {
val += x[make_idx(i, j-1, k)];
nval += 1;
}
if( j < %(sim_h)s-1 && !is_boundary(i, j+1, k, surface_height) ) {
val += x[make_idx(i, j+1, k)];
nval += 1;
}
if( k > 0 && !is_boundary(i, j, k-1, surface_height) ) {
val += x[make_idx(i, j, k-1)];
nval += 1;
}
if( k < %(sim_d)s-1 && !is_boundary(i, j, k+1, surface_height) ) {
val += x[make_idx(i, j, k+1)];
nval += 1;
}
x[make_idx(i, j, k)] = scale * val / nval;
}
/*
* Sets valid boundary values
*/
__global__ void boundary_f3(float scale, float3 *x, float *surface_height) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if( !is_boundary(i, j, k, surface_height) )
return;
float3 val = 0.0f * x[make_idx(i, j, k)];
float nval = 1e-4;
if( i > 0 && !is_boundary(i-1, j, k, surface_height) ) {
val += make_float3(scale * x[make_idx(i-1, j, k)].x, x[make_idx(i-1, j, k)].y, x[make_idx(i-1, j, k)].z); // x[make_idx(i-1, j, k)];
nval += 1;
}
if( i < %(sim_w)s-1 && !is_boundary(i+1, j, k, surface_height) ) {
val += make_float3(scale * x[make_idx(i+1, j, k)].x, x[make_idx(i+1, j, k)].y, x[make_idx(i+1, j, k)].z); // x[make_idx(i+1, j, k)];
nval += 1;
}
if( j > 0 && !is_boundary(i, j-1, k, surface_height) ) {
val += make_float3(x[make_idx(i, j-1, k)].x, scale * x[make_idx(i, j-1, k)].y, x[make_idx(i, j-1, k)].z); // x[make_idx(i, j-1, k)];
nval += 1;
}
if( j < %(sim_h)s-1 && !is_boundary(i, j+1, k, surface_height) ) {
val += make_float3(x[make_idx(i, j+1, k)].x, scale * x[make_idx(i, j+1, k)].y, x[make_idx(i, j+1, k)].z); // x[make_idx(i, j+1, k)];
nval += 1;
}
if( k > 0 && !is_boundary(i, j, k-1, surface_height) ) {
val += make_float3(x[make_idx(i, j, k-1)].x, x[make_idx(i, j, k-1)].y, scale * x[make_idx(i, j, k-1)].z); // x[make_idx(i, j, k-1)];
nval += 1;
}
if( k < %(sim_d)s-1 && !is_boundary(i, j, k+1, surface_height) ) {
val += make_float3(x[make_idx(i, j, k+1)].x, x[make_idx(i, j, k+1)].y, scale * x[make_idx(i, j, k+1)].z); // x[make_idx(i, j, k+1)];
nval += 1;
}
x[make_idx(i, j, k)] = val / nval;
}
}
|
cb63a5462c11dd5153ff52dbfa0b1e1d9faa8cbd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_inhibition_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxInhibitionLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
// if (has_ignore_label_ && label_value == ignore_label_) {
// loss[index] = 0;
// counts[index] = 0;
// } else {
// loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
// Dtype(FLT_MIN)));
// counts[index] = 1;
// }
loss[index] = 0;
for (int i = 0; i < dim / spatial_dim; ++i)
{
loss[index] += -log(max(prob_data[n * dim + i * spatial_dim + s],
Dtype(FLT_MIN)))/(dim / spatial_dim);
}
counts[index] = 1;
}
}
template <typename Dtype>
void SoftmaxInhibitionLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxInhibitionLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() >= 2) {
top[1]->ShareData(prob_);
}
if (top.size() >= 3) {
// Output per-instance loss
caffe_gpu_memcpy(top[2]->count() * sizeof(Dtype), loss_data, top[2]->mutable_gpu_data());
}
// Fix a bug, which happens when propagate_down[0] = false in backward
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
}
template <typename Dtype>
__global__ void SoftmaxInhibitionLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
// if (has_ignore_label_ && label_value == ignore_label_) {
// for (int c = 0; c < channels; ++c) {
// bottom_diff[n * dim + c * spatial_dim + s] = 0;
// }
// counts[index] = 0;
// } else {
// bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
// counts[index] = 1;
// }
for (int i = 0; i < dim / spatial_dim; ++i)
{
bottom_diff[n * dim + i * spatial_dim + s] -= 1/channels;
}
counts[index] = 1;
}
}
template <typename Dtype>
void SoftmaxInhibitionLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxInhibitionLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxInhibitionLossLayer);
} // namespace caffe
|
cb63a5462c11dd5153ff52dbfa0b1e1d9faa8cbd.cu
|
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_inhibition_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxInhibitionLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
// if (has_ignore_label_ && label_value == ignore_label_) {
// loss[index] = 0;
// counts[index] = 0;
// } else {
// loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
// Dtype(FLT_MIN)));
// counts[index] = 1;
// }
loss[index] = 0;
for (int i = 0; i < dim / spatial_dim; ++i)
{
loss[index] += -log(max(prob_data[n * dim + i * spatial_dim + s],
Dtype(FLT_MIN)))/(dim / spatial_dim);
}
counts[index] = 1;
}
}
template <typename Dtype>
void SoftmaxInhibitionLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxInhibitionLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() >= 2) {
top[1]->ShareData(prob_);
}
if (top.size() >= 3) {
// Output per-instance loss
caffe_gpu_memcpy(top[2]->count() * sizeof(Dtype), loss_data, top[2]->mutable_gpu_data());
}
// Fix a bug, which happens when propagate_down[0] = false in backward
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
}
template <typename Dtype>
__global__ void SoftmaxInhibitionLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
// if (has_ignore_label_ && label_value == ignore_label_) {
// for (int c = 0; c < channels; ++c) {
// bottom_diff[n * dim + c * spatial_dim + s] = 0;
// }
// counts[index] = 0;
// } else {
// bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
// counts[index] = 1;
// }
for (int i = 0; i < dim / spatial_dim; ++i)
{
bottom_diff[n * dim + i * spatial_dim + s] -= 1/channels;
}
counts[index] = 1;
}
}
template <typename Dtype>
void SoftmaxInhibitionLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxInhibitionLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxInhibitionLossLayer);
} // namespace caffe
|
9197c2e1e45131694db28db58fbc64604e6741cd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "criterion/backend/cuda/kernels/FullConnectionCriterion.cuh"
#include <cassert>
#include <cfloat>
#include <cmath>
#include <hipcub/hipcub.hpp>
namespace {
constexpr int kBlockSize = 128;
__global__ void forwardStep(
int N,
const double* fccacc_tp,
double* fccacc_t,
const float* input_t,
const float* trans,
double* transtmp) {
using BlockReduce = hipcub::BlockReduce<double, kBlockSize>;
__shared__ typename BlockReduce::TempStorage tempStorage;
__shared__ double maxValue;
assert(blockDim.x == kBlockSize);
double threadMax = -DBL_MAX;
for (int i = threadIdx.x; i < N; i += blockDim.x) {
int k = blockIdx.x * N + i;
transtmp[k] = trans[k % (N * N)] + fccacc_tp[k / (N * N) * N + i];
threadMax = (transtmp[k] > threadMax) ? transtmp[k] : threadMax;
}
double maxResult = BlockReduce(tempStorage).Reduce(threadMax, hipcub::Max());
if (threadIdx.x == 0) {
maxValue = maxResult;
}
__syncthreads();
double threadSum = 0;
for (int i = threadIdx.x; i < N; i += blockDim.x) {
int k = blockIdx.x * N + i;
threadSum += exp(transtmp[k] - maxValue);
}
double sumResult = BlockReduce(tempStorage).Sum(threadSum);
if (threadIdx.x == 0) {
fccacc_t[blockIdx.x] = log(sumResult) + maxValue + input_t[blockIdx.x];
}
}
__global__ void backwardStep1(
int N,
const double* fccacc_tp,
const double* fccgacc_t,
const float* trans,
double* transtmp,
double* gtrans) {
using BlockReduce = hipcub::BlockReduce<double, kBlockSize>;
__shared__ typename BlockReduce::TempStorage tempStorage;
__shared__ double maxValue;
__shared__ double sumValue;
assert(blockDim.x == kBlockSize);
double threadMax = -DBL_MAX;
for (int i = threadIdx.x; i < N; i += blockDim.x) {
int k = blockIdx.x * N + i;
transtmp[k] = trans[k % (N * N)] + fccacc_tp[k / (N * N) * N + i];
threadMax = (transtmp[k] > threadMax) ? transtmp[k] : threadMax;
}
double maxResult = BlockReduce(tempStorage).Reduce(threadMax, hipcub::Max());
if (threadIdx.x == 0) {
maxValue = maxResult;
}
__syncthreads();
double threadSum = 0;
for (int i = threadIdx.x; i < N; i += blockDim.x) {
int k = blockIdx.x * N + i;
transtmp[k] = exp(transtmp[k] - maxValue);
threadSum += transtmp[k];
}
double sumResult = BlockReduce(tempStorage).Sum(threadSum);
if (threadIdx.x == 0) {
sumValue = sumResult;
}
__syncthreads();
for (int i = threadIdx.x; i < N; i += blockDim.x) {
int k = blockIdx.x * N + i;
transtmp[k] = transtmp[k] / sumValue * fccgacc_t[blockIdx.x];
gtrans[k] += transtmp[k];
}
}
// sums along dim 1 instead of 0
__global__ void
computeSums1(int N0, int N1, const double* transtmp, double* sums) {
using BlockReduce = hipcub::BlockReduce<double, kBlockSize>;
__shared__ typename BlockReduce::TempStorage tempStorage;
assert(blockDim.x == kBlockSize);
double threadSum = 0;
for (int i = threadIdx.x; i < N1; i += blockDim.x) {
threadSum +=
transtmp[(blockIdx.x / N0) * N0 * N1 + i * N0 + (blockIdx.x % N0)];
}
double result = BlockReduce(tempStorage).Sum(threadSum);
if (threadIdx.x == 0) {
sums[blockIdx.x] = result;
}
}
} // namespace
namespace w2l {
namespace cuda {
/**
* original arrayfire code for FCC forward:
*
* for (int t = 1; t < T; t++) {
* const auto& fccacc_tp = fccacc(span, span, t - 1); // [N, B, 1]
* const auto& transtmp = tile(trans, 1, 1, B) +
* tile(moddims(fccacc_tp, N, 1, B), 1, N); // [N, N, B]
* const auto& maxes = max(transtmp, 0); // [1, N, B]
* const auto& lse =
* maxes + log(sum(exp(transtmp - tile(maxes, N)), 0)); // [1, N, B]
* fccacc(span, span, t) = moddims(lse, N, B, 1) + inp(span, span, t);
* }
*/
int fullConnectionCriterionForward(
int T,
int B,
int N,
const float* input,
const float* trans,
double* fccacc,
hipStream_t stream) {
int retval = 0;
double* transtmp;
if ((retval = hipMalloc(&transtmp, B * N * N * sizeof(double)))) {
goto err_1;
}
for (int t = 1; t < T; ++t) {
hipLaunchKernelGGL(( forwardStep), dim3(B * N), dim3(kBlockSize), 0, stream,
N,
fccacc + (t - 1) * B * N,
fccacc + t * B * N,
input + t * B * N,
trans,
transtmp);
}
hipFree(transtmp);
err_1:
return retval;
}
/**
* original arrayfire code for FCC backward:
*
* for (int t = T - 1; t > 0; t--) {
* const auto& fccacc_tp = fccacc(span, span, t - 1); // [N, B]
* const auto& transtmp = tile(trans, 1, 1, B) +
* tile(moddims(fccacc_tp, N, 1, B), 1, N); // [N, N, B]
* const auto& maxes = max(transtmp, 0); // [1, N, B]
* const auto& exps = exp(transtmp - tile(maxes, N)); // [N, N, B]
* const auto& dlse = exps / tile(sum(exps, 0), N); // [N, N, B]
*
* const auto& delta = dlse *
* tile(moddims(fccgacc(span, span, t), 1, N, B), N); // [N, N, B]
* fccgacc(span, span, t - 1) = moddims(sum(delta, 1), N, B);
* gtrans += sum(delta * tile(moddims(gscale, 1, 1, B), N, N), 2);
* }
*/
int fullConnectionCriterionBackward(
int T,
int B,
int N,
const float* trans,
const double* fccacc,
double* fccgacc,
double* gtrans,
hipStream_t stream) {
int retval = 0;
double* transtmp;
if ((retval = hipMalloc(&transtmp, B * N * N * sizeof(double)))) {
goto err_1;
}
for (int t = T - 1; t > 0; --t) {
hipLaunchKernelGGL(( backwardStep1), dim3(B * N), dim3(kBlockSize), 0, stream,
N,
fccacc + (t - 1) * B * N,
fccgacc + t * B * N,
trans,
transtmp,
gtrans);
hipLaunchKernelGGL(( computeSums1), dim3(B * N), dim3(kBlockSize), 0, stream,
N, N, transtmp, fccgacc + (t - 1) * B * N);
}
hipFree(transtmp);
err_1:
return retval;
}
} // namespace cuda
} // namespace w2l
|
9197c2e1e45131694db28db58fbc64604e6741cd.cu
|
#include "criterion/backend/cuda/kernels/FullConnectionCriterion.cuh"
#include <cassert>
#include <cfloat>
#include <cmath>
#include <cub/cub.cuh>
namespace {
constexpr int kBlockSize = 128;
__global__ void forwardStep(
int N,
const double* fccacc_tp,
double* fccacc_t,
const float* input_t,
const float* trans,
double* transtmp) {
using BlockReduce = cub::BlockReduce<double, kBlockSize>;
__shared__ typename BlockReduce::TempStorage tempStorage;
__shared__ double maxValue;
assert(blockDim.x == kBlockSize);
double threadMax = -DBL_MAX;
for (int i = threadIdx.x; i < N; i += blockDim.x) {
int k = blockIdx.x * N + i;
transtmp[k] = trans[k % (N * N)] + fccacc_tp[k / (N * N) * N + i];
threadMax = (transtmp[k] > threadMax) ? transtmp[k] : threadMax;
}
double maxResult = BlockReduce(tempStorage).Reduce(threadMax, cub::Max());
if (threadIdx.x == 0) {
maxValue = maxResult;
}
__syncthreads();
double threadSum = 0;
for (int i = threadIdx.x; i < N; i += blockDim.x) {
int k = blockIdx.x * N + i;
threadSum += exp(transtmp[k] - maxValue);
}
double sumResult = BlockReduce(tempStorage).Sum(threadSum);
if (threadIdx.x == 0) {
fccacc_t[blockIdx.x] = log(sumResult) + maxValue + input_t[blockIdx.x];
}
}
__global__ void backwardStep1(
int N,
const double* fccacc_tp,
const double* fccgacc_t,
const float* trans,
double* transtmp,
double* gtrans) {
using BlockReduce = cub::BlockReduce<double, kBlockSize>;
__shared__ typename BlockReduce::TempStorage tempStorage;
__shared__ double maxValue;
__shared__ double sumValue;
assert(blockDim.x == kBlockSize);
double threadMax = -DBL_MAX;
for (int i = threadIdx.x; i < N; i += blockDim.x) {
int k = blockIdx.x * N + i;
transtmp[k] = trans[k % (N * N)] + fccacc_tp[k / (N * N) * N + i];
threadMax = (transtmp[k] > threadMax) ? transtmp[k] : threadMax;
}
double maxResult = BlockReduce(tempStorage).Reduce(threadMax, cub::Max());
if (threadIdx.x == 0) {
maxValue = maxResult;
}
__syncthreads();
double threadSum = 0;
for (int i = threadIdx.x; i < N; i += blockDim.x) {
int k = blockIdx.x * N + i;
transtmp[k] = exp(transtmp[k] - maxValue);
threadSum += transtmp[k];
}
double sumResult = BlockReduce(tempStorage).Sum(threadSum);
if (threadIdx.x == 0) {
sumValue = sumResult;
}
__syncthreads();
for (int i = threadIdx.x; i < N; i += blockDim.x) {
int k = blockIdx.x * N + i;
transtmp[k] = transtmp[k] / sumValue * fccgacc_t[blockIdx.x];
gtrans[k] += transtmp[k];
}
}
// sums along dim 1 instead of 0
__global__ void
computeSums1(int N0, int N1, const double* transtmp, double* sums) {
using BlockReduce = cub::BlockReduce<double, kBlockSize>;
__shared__ typename BlockReduce::TempStorage tempStorage;
assert(blockDim.x == kBlockSize);
double threadSum = 0;
for (int i = threadIdx.x; i < N1; i += blockDim.x) {
threadSum +=
transtmp[(blockIdx.x / N0) * N0 * N1 + i * N0 + (blockIdx.x % N0)];
}
double result = BlockReduce(tempStorage).Sum(threadSum);
if (threadIdx.x == 0) {
sums[blockIdx.x] = result;
}
}
} // namespace
namespace w2l {
namespace cuda {
/**
* original arrayfire code for FCC forward:
*
* for (int t = 1; t < T; t++) {
* const auto& fccacc_tp = fccacc(span, span, t - 1); // [N, B, 1]
* const auto& transtmp = tile(trans, 1, 1, B) +
* tile(moddims(fccacc_tp, N, 1, B), 1, N); // [N, N, B]
* const auto& maxes = max(transtmp, 0); // [1, N, B]
* const auto& lse =
* maxes + log(sum(exp(transtmp - tile(maxes, N)), 0)); // [1, N, B]
* fccacc(span, span, t) = moddims(lse, N, B, 1) + inp(span, span, t);
* }
*/
int fullConnectionCriterionForward(
int T,
int B,
int N,
const float* input,
const float* trans,
double* fccacc,
cudaStream_t stream) {
int retval = 0;
double* transtmp;
if ((retval = cudaMalloc(&transtmp, B * N * N * sizeof(double)))) {
goto err_1;
}
for (int t = 1; t < T; ++t) {
forwardStep<<<B * N, kBlockSize, 0, stream>>>(
N,
fccacc + (t - 1) * B * N,
fccacc + t * B * N,
input + t * B * N,
trans,
transtmp);
}
cudaFree(transtmp);
err_1:
return retval;
}
/**
* original arrayfire code for FCC backward:
*
* for (int t = T - 1; t > 0; t--) {
* const auto& fccacc_tp = fccacc(span, span, t - 1); // [N, B]
* const auto& transtmp = tile(trans, 1, 1, B) +
* tile(moddims(fccacc_tp, N, 1, B), 1, N); // [N, N, B]
* const auto& maxes = max(transtmp, 0); // [1, N, B]
* const auto& exps = exp(transtmp - tile(maxes, N)); // [N, N, B]
* const auto& dlse = exps / tile(sum(exps, 0), N); // [N, N, B]
*
* const auto& delta = dlse *
* tile(moddims(fccgacc(span, span, t), 1, N, B), N); // [N, N, B]
* fccgacc(span, span, t - 1) = moddims(sum(delta, 1), N, B);
* gtrans += sum(delta * tile(moddims(gscale, 1, 1, B), N, N), 2);
* }
*/
int fullConnectionCriterionBackward(
int T,
int B,
int N,
const float* trans,
const double* fccacc,
double* fccgacc,
double* gtrans,
cudaStream_t stream) {
int retval = 0;
double* transtmp;
if ((retval = cudaMalloc(&transtmp, B * N * N * sizeof(double)))) {
goto err_1;
}
for (int t = T - 1; t > 0; --t) {
backwardStep1<<<B * N, kBlockSize, 0, stream>>>(
N,
fccacc + (t - 1) * B * N,
fccgacc + t * B * N,
trans,
transtmp,
gtrans);
computeSums1<<<B * N, kBlockSize, 0, stream>>>(
N, N, transtmp, fccgacc + (t - 1) * B * N);
}
cudaFree(transtmp);
err_1:
return retval;
}
} // namespace cuda
} // namespace w2l
|
41dca0f68b252ba3ccd4ee4e7f84dcaffe982ddc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "KerSortDataParticles.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned n = XSIZE*YSIZE;
unsigned pini = 1;
const unsigned *sortpart = NULL;
hipMalloc(&sortpart, XSIZE*YSIZE);
const float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
float *a2 = NULL;
hipMalloc(&a2, XSIZE*YSIZE);
float *b2 = NULL;
hipMalloc(&b2, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
KerSortDataParticles), dim3(gridBlock),dim3(threadBlock), 0, 0, n,pini,sortpart,a,b,a2,b2);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
KerSortDataParticles), dim3(gridBlock),dim3(threadBlock), 0, 0, n,pini,sortpart,a,b,a2,b2);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
KerSortDataParticles), dim3(gridBlock),dim3(threadBlock), 0, 0, n,pini,sortpart,a,b,a2,b2);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
41dca0f68b252ba3ccd4ee4e7f84dcaffe982ddc.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "KerSortDataParticles.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned n = XSIZE*YSIZE;
unsigned pini = 1;
const unsigned *sortpart = NULL;
cudaMalloc(&sortpart, XSIZE*YSIZE);
const float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
float *a2 = NULL;
cudaMalloc(&a2, XSIZE*YSIZE);
float *b2 = NULL;
cudaMalloc(&b2, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
KerSortDataParticles<<<gridBlock,threadBlock>>>(n,pini,sortpart,a,b,a2,b2);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
KerSortDataParticles<<<gridBlock,threadBlock>>>(n,pini,sortpart,a,b,a2,b2);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
KerSortDataParticles<<<gridBlock,threadBlock>>>(n,pini,sortpart,a,b,a2,b2);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
72c7a880f7f7c2be2b94700a0e6504580b60fccb.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* (C) Copyright 1996-2016 ECMWF.
*
* This software is licensed under the terms of the Apache Licence Version 2.0
* which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
* In applying this licence, ECMWF does not waive the privileges and immunities
* granted to it by virtue of its status as an intergovernmental organisation nor
* does it submit to any jurisdiction.
*/
#include <hip/hip_runtime.h>
#include "tests/AtlasTestEnvironment.h"
#include "atlas/array/Vector.h"
#include "atlas/array/gridtools/GPUClonable.h"
#include "atlas/array.h"
#include "atlas/array/MakeView.h"
#include "atlas/runtime/Log.h"
using namespace atlas::array;
namespace atlas {
namespace test {
struct int_gpu {
int_gpu() = delete;
int_gpu(int val) : val_(val), gpu_clone_(this) {}
int_gpu* gpu_object_ptr() {return gpu_clone_.gpu_object_ptr();}
void updateDevice(){ gpu_clone_.updateDevice();}
void updateHost(){ gpu_clone_.updateHost();}
int val_;
private:
array::gridtools::GPUClonable<int_gpu> gpu_clone_;
};
__global__
void kernel_ex(VectorView<int_gpu*>* list_ints )
{
for(size_t i=0; i < list_ints->size() ; ++i) {
(*list_ints)[i]->val_ += 5;
}
}
CASE( "test_resize" )
{
Vector<int_gpu*> list_ints(2);
VectorView<int_gpu*> list_ints_h = make_host_vector_view(list_ints);
list_ints_h[0] = new int_gpu(3);
list_ints_h[1] = new int_gpu(4);
EXPECT( list_ints_h[0]->val_ == 3 );
EXPECT( list_ints_h[1]->val_ == 4 );
list_ints.resize(6);
EXPECT( list_ints_h.is_valid(list_ints) == false );
VectorView<int_gpu*> list_ints_h2 = make_host_vector_view(list_ints);
EXPECT( list_ints_h2[0]->val_ == 3 );
EXPECT( list_ints_h2[1]->val_ == 4 );
EXPECT( list_ints_h2.size() == 6 );
}
CASE( "test_vector_kernel" )
{
Vector<int_gpu*> list_ints(4);
VectorView<int_gpu*> list_ints_h = make_host_vector_view(list_ints);
list_ints_h[0] = new int_gpu(3);
list_ints_h[1] = new int_gpu(4);
list_ints_h[2] = new int_gpu(5);
list_ints_h[3] = new int_gpu(6);
list_ints.updateDevice();
VectorView<int_gpu*> list_ints_d = make_device_vector_view(list_ints);
VectorView<int_gpu*>* list_ints_dp;
hipMalloc((void**)(&list_ints_dp), sizeof(VectorView<int_gpu*>));
hipMemcpy(list_ints_dp, &list_ints_d, sizeof(VectorView<int_gpu*>), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_ex), dim3(1),dim3(1), 0, 0, list_ints_dp);
if( hipPeekAtLastError() != hipSuccess) std::cout << "ERROR " << std::endl;
list_ints.updateHost();
EXPECT( list_ints_h[0]->val_ == 8 );
EXPECT_THROWS_AS( list_ints.resize(6), eckit::AssertionFailed );
}
}
}
int main(int argc, char **argv) {
return atlas::test::run( argc, argv );
}
|
72c7a880f7f7c2be2b94700a0e6504580b60fccb.cu
|
/*
* (C) Copyright 1996-2016 ECMWF.
*
* This software is licensed under the terms of the Apache Licence Version 2.0
* which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
* In applying this licence, ECMWF does not waive the privileges and immunities
* granted to it by virtue of its status as an intergovernmental organisation nor
* does it submit to any jurisdiction.
*/
#include <cuda_runtime.h>
#include "tests/AtlasTestEnvironment.h"
#include "atlas/array/Vector.h"
#include "atlas/array/gridtools/GPUClonable.h"
#include "atlas/array.h"
#include "atlas/array/MakeView.h"
#include "atlas/runtime/Log.h"
using namespace atlas::array;
namespace atlas {
namespace test {
struct int_gpu {
int_gpu() = delete;
int_gpu(int val) : val_(val), gpu_clone_(this) {}
int_gpu* gpu_object_ptr() {return gpu_clone_.gpu_object_ptr();}
void updateDevice(){ gpu_clone_.updateDevice();}
void updateHost(){ gpu_clone_.updateHost();}
int val_;
private:
array::gridtools::GPUClonable<int_gpu> gpu_clone_;
};
__global__
void kernel_ex(VectorView<int_gpu*>* list_ints )
{
for(size_t i=0; i < list_ints->size() ; ++i) {
(*list_ints)[i]->val_ += 5;
}
}
CASE( "test_resize" )
{
Vector<int_gpu*> list_ints(2);
VectorView<int_gpu*> list_ints_h = make_host_vector_view(list_ints);
list_ints_h[0] = new int_gpu(3);
list_ints_h[1] = new int_gpu(4);
EXPECT( list_ints_h[0]->val_ == 3 );
EXPECT( list_ints_h[1]->val_ == 4 );
list_ints.resize(6);
EXPECT( list_ints_h.is_valid(list_ints) == false );
VectorView<int_gpu*> list_ints_h2 = make_host_vector_view(list_ints);
EXPECT( list_ints_h2[0]->val_ == 3 );
EXPECT( list_ints_h2[1]->val_ == 4 );
EXPECT( list_ints_h2.size() == 6 );
}
CASE( "test_vector_kernel" )
{
Vector<int_gpu*> list_ints(4);
VectorView<int_gpu*> list_ints_h = make_host_vector_view(list_ints);
list_ints_h[0] = new int_gpu(3);
list_ints_h[1] = new int_gpu(4);
list_ints_h[2] = new int_gpu(5);
list_ints_h[3] = new int_gpu(6);
list_ints.updateDevice();
VectorView<int_gpu*> list_ints_d = make_device_vector_view(list_ints);
VectorView<int_gpu*>* list_ints_dp;
cudaMalloc((void**)(&list_ints_dp), sizeof(VectorView<int_gpu*>));
cudaMemcpy(list_ints_dp, &list_ints_d, sizeof(VectorView<int_gpu*>), cudaMemcpyHostToDevice);
kernel_ex<<<1,1>>>(list_ints_dp);
if( cudaPeekAtLastError() != cudaSuccess) std::cout << "ERROR " << std::endl;
list_ints.updateHost();
EXPECT( list_ints_h[0]->val_ == 8 );
EXPECT_THROWS_AS( list_ints.resize(6), eckit::AssertionFailed );
}
}
}
int main(int argc, char **argv) {
return atlas::test::run( argc, argv );
}
|
052e5869d9b60a9be1dad8b557ef7d4c7818d7b5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/fil/fil.h>
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <treelite/c_api.h>
#include <treelite/frontend.h>
#include <treelite/tree.h>
#include <cmath>
#include <cstdio>
#include <limits>
#include <memory>
#include <numeric>
#include <ostream>
#include <raft/cuda_utils.cuh>
#include <raft/random/rng.cuh>
#include <utility>
#include "../../src/fil/internal.cuh"
#define TL_CPP_CHECK(call) ASSERT(int(call) >= 0, "treelite call error")
namespace ML {
namespace tl = treelite;
namespace tlf = treelite::frontend;
using namespace fil;
struct FilTestParams {
// input data parameters
int num_rows = 20'000;
int num_cols = 50;
float nan_prob = 0.05;
// forest parameters
int depth = 8;
int num_trees = 50;
float leaf_prob = 0.05;
// output parameters
output_t output = output_t::RAW;
float threshold = 0.0f;
float global_bias = 0.0f;
// runtime parameters
int blocks_per_sm = 0;
int threads_per_tree = 1;
int n_items = 0;
algo_t algo = algo_t::NAIVE;
int seed = 42;
float tolerance = 2e-3f;
bool print_forest_shape = false;
// treelite parameters, only used for treelite tests
tl::Operator op = tl::Operator::kLT;
leaf_algo_t leaf_algo = leaf_algo_t::FLOAT_UNARY_BINARY;
// when FLOAT_UNARY_BINARY == leaf_algo:
// num_classes = 1 means it's regression
// num_classes = 2 means it's binary classification
// (complement probabilities, then use threshold)
// when GROVE_PER_CLASS == leaf_algo:
// it's multiclass classification (num_classes must be > 2),
// done by splitting the forest in num_classes groups,
// each of which computes one-vs-all probability for its class.
// when CATEGORICAL_LEAF == leaf_algo:
// num_classes must be > 1 and it's multiclass classification.
// done by storing the class label in each leaf and voting.
// it's used in treelite ModelBuilder initialization
int num_classes = 1;
size_t num_proba_outputs() { return num_rows * ::max(num_classes, 2); }
size_t num_preds_outputs() { return num_rows; }
};
std::string output2str(fil::output_t output)
{
if (output == fil::RAW) return "RAW";
std::string s = "";
if (output & fil::AVG) s += "| AVG";
if (output & fil::CLASS) s += "| CLASS";
if (output & fil::SIGMOID) s += "| SIGMOID";
if (output & fil::SOFTMAX) s += "| SOFTMAX";
return s;
}
std::ostream& operator<<(std::ostream& os, const FilTestParams& ps)
{
os << "num_rows = " << ps.num_rows << ", num_cols = " << ps.num_cols
<< ", nan_prob = " << ps.nan_prob << ", depth = " << ps.depth
<< ", num_trees = " << ps.num_trees << ", leaf_prob = " << ps.leaf_prob
<< ", output = " << output2str(ps.output) << ", threshold = " << ps.threshold
<< ", threads_per_tree = " << ps.threads_per_tree << ", n_items = " << ps.n_items
<< ", blocks_per_sm = " << ps.blocks_per_sm << ", algo = " << ps.algo << ", seed = " << ps.seed
<< ", tolerance = " << ps.tolerance << ", op = " << tl::OpName(ps.op)
<< ", global_bias = " << ps.global_bias << ", leaf_algo = " << ps.leaf_algo
<< ", num_classes = " << ps.num_classes;
return os;
}
__global__ void nan_kernel(float* data, const bool* mask, int len, float nan)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= len) return;
if (!mask[tid]) data[tid] = nan;
}
float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); }
class BaseFilTest : public testing::TestWithParam<FilTestParams> {
protected:
void setup_helper()
{
// setup
ps = testing::TestWithParam<FilTestParams>::GetParam();
CUDA_CHECK(hipStreamCreate(&stream));
handle.set_stream(stream);
generate_forest();
generate_data();
predict_on_cpu();
predict_on_gpu();
}
void SetUp() override { setup_helper(); }
void TearDown() override
{
CUDA_CHECK(hipFree(preds_d));
CUDA_CHECK(hipFree(want_preds_d));
CUDA_CHECK(hipFree(data_d));
CUDA_CHECK(hipFree(want_proba_d));
CUDA_CHECK(hipFree(proba_d));
}
void generate_forest()
{
size_t num_nodes = forest_num_nodes();
// helper data
/// weights, used as float* or int*
int* weights_d = nullptr;
float* thresholds_d = nullptr;
int* fids_d = nullptr;
bool* def_lefts_d = nullptr;
bool* is_leafs_d = nullptr;
bool* def_lefts_h = nullptr;
bool* is_leafs_h = nullptr;
// allocate GPU data
raft::allocate(weights_d, num_nodes);
// sizeof(float) == sizeof(int)
raft::allocate(thresholds_d, num_nodes);
raft::allocate(fids_d, num_nodes);
raft::allocate(def_lefts_d, num_nodes);
raft::allocate(is_leafs_d, num_nodes);
// generate on-GPU random data
raft::random::Rng r(ps.seed);
if (ps.leaf_algo == fil::leaf_algo_t::CATEGORICAL_LEAF) {
// [0..num_classes)
r.uniformInt((int*)weights_d, num_nodes, 0, ps.num_classes, stream);
} else if (ps.leaf_algo == fil::leaf_algo_t::VECTOR_LEAF) {
std::mt19937 gen(3);
std::uniform_real_distribution<> dist(0, 1);
vector_leaf.resize(num_nodes * ps.num_classes);
for (size_t i = 0; i < vector_leaf.size(); i++) {
vector_leaf[i] = dist(gen);
}
// Normalise probabilities to 1
for (size_t i = 0; i < vector_leaf.size(); i += ps.num_classes) {
auto sum = std::accumulate(&vector_leaf[i], &vector_leaf[i + ps.num_classes], 0.0f);
for (size_t j = i; j < i + ps.num_classes; j++) {
vector_leaf[j] /= sum;
}
}
} else {
r.uniform((float*)weights_d, num_nodes, -1.0f, 1.0f, stream);
}
r.uniform(thresholds_d, num_nodes, -1.0f, 1.0f, stream);
r.uniformInt(fids_d, num_nodes, 0, ps.num_cols, stream);
r.bernoulli(def_lefts_d, num_nodes, 0.5f, stream);
r.bernoulli(is_leafs_d, num_nodes, 1.0f - ps.leaf_prob, stream);
// copy data to host
std::vector<float> thresholds_h(num_nodes);
std::vector<int> weights_h(num_nodes), fids_h(num_nodes);
def_lefts_h = new bool[num_nodes];
is_leafs_h = new bool[num_nodes];
raft::update_host(weights_h.data(), (int*)weights_d, num_nodes, stream);
raft::update_host(thresholds_h.data(), thresholds_d, num_nodes, stream);
raft::update_host(fids_h.data(), fids_d, num_nodes, stream);
raft::update_host(def_lefts_h, def_lefts_d, num_nodes, stream);
raft::update_host(is_leafs_h, is_leafs_d, num_nodes, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
// mark leaves
for (size_t i = 0; i < ps.num_trees; ++i) {
int num_tree_nodes = tree_num_nodes();
size_t leaf_start = num_tree_nodes * i + num_tree_nodes / 2;
size_t leaf_end = num_tree_nodes * (i + 1);
for (size_t j = leaf_start; j < leaf_end; ++j) {
is_leafs_h[j] = true;
}
}
// initialize nodes
nodes.resize(num_nodes);
for (size_t i = 0; i < num_nodes; ++i) {
fil::val_t w;
switch (ps.leaf_algo) {
case fil::leaf_algo_t::CATEGORICAL_LEAF: w.idx = weights_h[i]; break;
case fil::leaf_algo_t::FLOAT_UNARY_BINARY:
case fil::leaf_algo_t::GROVE_PER_CLASS:
// not relying on fil::val_t internals
// merely that we copied floats into weights_h earlier
std::memcpy(&w.f, &weights_h[i], sizeof w.f);
break;
case fil::leaf_algo_t::VECTOR_LEAF: w.idx = i; break;
default: ASSERT(false, "internal error: invalid ps.leaf_algo");
}
nodes[i] = fil::dense_node(w, thresholds_h[i], fids_h[i], def_lefts_h[i], is_leafs_h[i]);
}
// clean up
delete[] def_lefts_h;
delete[] is_leafs_h;
CUDA_CHECK(hipFree(is_leafs_d));
CUDA_CHECK(hipFree(def_lefts_d));
CUDA_CHECK(hipFree(fids_d));
CUDA_CHECK(hipFree(thresholds_d));
CUDA_CHECK(hipFree(weights_d));
}
void generate_data()
{
// allocate arrays
size_t num_data = ps.num_rows * ps.num_cols;
raft::allocate(data_d, num_data);
bool* mask_d = nullptr;
raft::allocate(mask_d, num_data);
// generate random data
raft::random::Rng r(ps.seed);
r.uniform(data_d, num_data, -1.0f, 1.0f, stream);
r.bernoulli(mask_d, num_data, ps.nan_prob, stream);
int tpb = 256;
hipLaunchKernelGGL(( nan_kernel), dim3(raft::ceildiv(int(num_data), tpb)), dim3(tpb), 0, stream,
data_d, mask_d, num_data, std::numeric_limits<float>::quiet_NaN());
CUDA_CHECK(hipPeekAtLastError());
// copy to host
data_h.resize(num_data);
raft::update_host(data_h.data(), data_d, num_data, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
// clean up
CUDA_CHECK(hipFree(mask_d));
}
void apply_softmax(float* class_scores)
{
float max = *std::max_element(class_scores, &class_scores[ps.num_classes]);
for (int i = 0; i < ps.num_classes; ++i)
class_scores[i] = expf(class_scores[i] - max);
float sum = std::accumulate(class_scores, &class_scores[ps.num_classes], 0.0f);
for (int i = 0; i < ps.num_classes; ++i)
class_scores[i] /= sum;
}
void transform(float f, float& proba, float& output)
{
if ((ps.output & fil::output_t::AVG) != 0) {
if (ps.leaf_algo == fil::leaf_algo_t::GROVE_PER_CLASS) {
f /= ps.num_trees / ps.num_classes;
} else {
f *= 1.0f / ps.num_trees;
}
}
f += ps.global_bias;
if ((ps.output & fil::output_t::SIGMOID) != 0) { f = sigmoid(f); }
proba = f;
if ((ps.output & fil::output_t::CLASS) != 0) { f = f > ps.threshold ? 1.0f : 0.0f; }
output = f;
}
void complement(float* proba) { proba[0] = 1.0f - proba[1]; }
void predict_on_cpu()
{
// predict on host
std::vector<float> want_preds_h(ps.num_preds_outputs());
std::vector<float> want_proba_h(ps.num_proba_outputs());
int num_nodes = tree_num_nodes();
std::vector<float> class_scores(ps.num_classes);
switch (ps.leaf_algo) {
case fil::leaf_algo_t::FLOAT_UNARY_BINARY:
for (int i = 0; i < ps.num_rows; ++i) {
float pred = 0.0f;
for (int j = 0; j < ps.num_trees; ++j) {
pred += infer_one_tree(&nodes[j * num_nodes], &data_h[i * ps.num_cols]).f;
}
transform(pred, want_proba_h[i * 2 + 1], want_preds_h[i]);
complement(&(want_proba_h[i * 2]));
}
break;
case fil::leaf_algo_t::GROVE_PER_CLASS:
for (int row = 0; row < ps.num_rows; ++row) {
std::fill(class_scores.begin(), class_scores.end(), 0.0f);
for (int tree = 0; tree < ps.num_trees; ++tree) {
class_scores[tree % ps.num_classes] +=
infer_one_tree(&nodes[tree * num_nodes], &data_h[row * ps.num_cols]).f;
}
want_preds_h[row] =
std::max_element(class_scores.begin(), class_scores.end()) - class_scores.begin();
for (int c = 0; c < ps.num_classes; ++c) {
float thresholded_proba; // not used;
transform(class_scores[c], want_proba_h[row * ps.num_classes + c], thresholded_proba);
}
if ((ps.output & fil::output_t::SOFTMAX) != 0)
apply_softmax(&want_proba_h[row * ps.num_classes]);
}
break;
case fil::leaf_algo_t::CATEGORICAL_LEAF: {
std::vector<int> class_votes(ps.num_classes);
for (int r = 0; r < ps.num_rows; ++r) {
std::fill(class_votes.begin(), class_votes.end(), 0);
for (int j = 0; j < ps.num_trees; ++j) {
int class_label = infer_one_tree(&nodes[j * num_nodes], &data_h[r * ps.num_cols]).idx;
++class_votes[class_label];
}
for (int c = 0; c < ps.num_classes; ++c) {
float thresholded_proba; // not used; do argmax instead
transform(class_votes[c], want_proba_h[r * ps.num_classes + c], thresholded_proba);
}
want_preds_h[r] =
std::max_element(class_votes.begin(), class_votes.end()) - class_votes.begin();
}
break;
}
case fil::leaf_algo_t::VECTOR_LEAF:
for (int r = 0; r < ps.num_rows; ++r) {
std::vector<float> class_probabilities(ps.num_classes);
for (int j = 0; j < ps.num_trees; ++j) {
int vector_index = infer_one_tree(&nodes[j * num_nodes], &data_h[r * ps.num_cols]).idx;
float sum = 0.0;
for (int k = 0; k < ps.num_classes; k++) {
class_probabilities[k] += vector_leaf[vector_index * ps.num_classes + k];
sum += vector_leaf[vector_index * ps.num_classes + k];
}
ASSERT_LE(std::abs(sum - 1.0f), 1e-5);
}
for (int c = 0; c < ps.num_classes; ++c) {
want_proba_h[r * ps.num_classes + c] = class_probabilities[c];
}
want_preds_h[r] =
std::max_element(class_probabilities.begin(), class_probabilities.end()) -
class_probabilities.begin();
}
break;
}
// copy to GPU
raft::allocate(want_preds_d, ps.num_preds_outputs());
raft::allocate(want_proba_d, ps.num_proba_outputs());
raft::update_device(want_preds_d, want_preds_h.data(), ps.num_preds_outputs(), stream);
raft::update_device(want_proba_d, want_proba_h.data(), ps.num_proba_outputs(), stream);
CUDA_CHECK(hipStreamSynchronize(stream));
}
virtual void init_forest(fil::forest_t* pforest) = 0;
void predict_on_gpu()
{
fil::forest_t forest = nullptr;
init_forest(&forest);
// predict
raft::allocate(preds_d, ps.num_preds_outputs());
raft::allocate(proba_d, ps.num_proba_outputs());
fil::predict(handle, forest, preds_d, data_d, ps.num_rows);
fil::predict(handle, forest, proba_d, data_d, ps.num_rows, true);
CUDA_CHECK(hipStreamSynchronize(stream));
// cleanup
fil::free(handle, forest);
}
void compare()
{
ASSERT_TRUE(raft::devArrMatch(want_proba_d,
proba_d,
ps.num_proba_outputs(),
raft::CompareApprox<float>(ps.tolerance),
stream));
float tolerance = ps.leaf_algo == fil::leaf_algo_t::FLOAT_UNARY_BINARY
? ps.tolerance
: std::numeric_limits<float>::epsilon();
// in multi-class prediction, floats represent the most likely class
// and would be generated by converting an int to float
ASSERT_TRUE(raft::devArrMatch(
want_preds_d, preds_d, ps.num_rows, raft::CompareApprox<float>(tolerance), stream));
}
fil::val_t infer_one_tree(fil::dense_node* root, float* data)
{
int curr = 0;
fil::val_t output{.f = 0.0f};
for (;;) {
const fil::dense_node& node = root[curr];
if (node.is_leaf()) return node.base_node::output<val_t>();
float val = data[node.fid()];
bool cond = isnan(val) ? !node.def_left() : val >= node.thresh();
curr = (curr << 1) + 1 + (cond ? 1 : 0);
}
return output;
}
int tree_num_nodes() { return (1 << (ps.depth + 1)) - 1; }
int forest_num_nodes() { return tree_num_nodes() * ps.num_trees; }
// predictions
float* preds_d = nullptr;
float* proba_d = nullptr;
float* want_preds_d = nullptr;
float* want_proba_d = nullptr;
// input data
float* data_d = nullptr;
std::vector<float> data_h;
// forest data
std::vector<fil::dense_node> nodes;
std::vector<float> vector_leaf;
// parameters
hipStream_t stream;
raft::handle_t handle;
FilTestParams ps;
};
class PredictDenseFilTest : public BaseFilTest {
protected:
void init_forest(fil::forest_t* pforest) override
{
// init FIL model
fil::forest_params_t fil_ps;
fil_ps.depth = ps.depth;
fil_ps.num_trees = ps.num_trees;
fil_ps.num_cols = ps.num_cols;
fil_ps.algo = ps.algo;
fil_ps.output = ps.output;
fil_ps.threshold = ps.threshold;
fil_ps.global_bias = ps.global_bias;
fil_ps.leaf_algo = ps.leaf_algo;
fil_ps.num_classes = ps.num_classes;
fil_ps.blocks_per_sm = ps.blocks_per_sm;
fil_ps.threads_per_tree = ps.threads_per_tree;
fil_ps.n_items = ps.n_items;
fil::init_dense(handle, pforest, nodes.data(), &fil_ps, vector_leaf);
}
};
template <typename fil_node_t>
class BasePredictSparseFilTest : public BaseFilTest {
protected:
void dense2sparse_node(const fil::dense_node* dense_root,
int i_dense,
int i_sparse_root,
int i_sparse)
{
const fil::dense_node& node = dense_root[i_dense];
if (node.is_leaf()) {
// leaf sparse node
sparse_nodes[i_sparse] = fil_node_t(node.base_node::output<val_t>(),
node.thresh(),
node.fid(),
node.def_left(),
node.is_leaf(),
0);
return;
}
// inner sparse node
// reserve space for children
int left_index = sparse_nodes.size();
sparse_nodes.push_back(fil_node_t());
sparse_nodes.push_back(fil_node_t());
sparse_nodes[i_sparse] = fil_node_t(node.base_node::output<val_t>(),
node.thresh(),
node.fid(),
node.def_left(),
node.is_leaf(),
left_index - i_sparse_root);
dense2sparse_node(dense_root, 2 * i_dense + 1, i_sparse_root, left_index);
dense2sparse_node(dense_root, 2 * i_dense + 2, i_sparse_root, left_index + 1);
}
void dense2sparse_tree(const fil::dense_node* dense_root)
{
int i_sparse_root = sparse_nodes.size();
sparse_nodes.push_back(fil_node_t());
dense2sparse_node(dense_root, 0, i_sparse_root, i_sparse_root);
trees.push_back(i_sparse_root);
}
void dense2sparse()
{
for (int tree = 0; tree < ps.num_trees; ++tree) {
dense2sparse_tree(&nodes[tree * tree_num_nodes()]);
}
}
void init_forest(fil::forest_t* pforest) override
{
// init FIL model
fil::forest_params_t fil_params;
fil_params.num_trees = ps.num_trees;
fil_params.num_cols = ps.num_cols;
fil_params.algo = ps.algo;
fil_params.output = ps.output;
fil_params.threshold = ps.threshold;
fil_params.global_bias = ps.global_bias;
fil_params.leaf_algo = ps.leaf_algo;
fil_params.num_classes = ps.num_classes;
fil_params.blocks_per_sm = ps.blocks_per_sm;
fil_params.threads_per_tree = ps.threads_per_tree;
fil_params.n_items = ps.n_items;
dense2sparse();
fil_params.num_nodes = sparse_nodes.size();
fil::init_sparse(handle, pforest, trees.data(), sparse_nodes.data(), &fil_params, vector_leaf);
}
std::vector<fil_node_t> sparse_nodes;
std::vector<int> trees;
};
typedef BasePredictSparseFilTest<fil::sparse_node16> PredictSparse16FilTest;
typedef BasePredictSparseFilTest<fil::sparse_node8> PredictSparse8FilTest;
class TreeliteFilTest : public BaseFilTest {
protected:
/** adds nodes[node] of tree starting at index root to builder
at index at *pkey, increments *pkey,
and returns the treelite key of the node */
int node_to_treelite(tlf::TreeBuilder* builder, int* pkey, int root, int node)
{
int key = (*pkey)++;
builder->CreateNode(key);
const fil::dense_node& dense_node = nodes[node];
if (dense_node.is_leaf()) {
switch (ps.leaf_algo) {
case fil::leaf_algo_t::FLOAT_UNARY_BINARY:
case fil::leaf_algo_t::GROVE_PER_CLASS:
// default is fil::FLOAT_UNARY_BINARY
builder->SetLeafNode(key, tlf::Value::Create(dense_node.base_node::output<val_t>().f));
break;
case fil::leaf_algo_t::CATEGORICAL_LEAF: {
std::vector<tlf::Value> vec(ps.num_classes);
for (int i = 0; i < ps.num_classes; ++i) {
vec[i] = tlf::Value::Create(i == dense_node.template output<val_t>().idx ? 1.0f : 0.0f);
}
builder->SetLeafVectorNode(key, vec);
break;
}
case fil::leaf_algo_t::VECTOR_LEAF: {
std::vector<tlf::Value> vec(ps.num_classes);
for (int i = 0; i < ps.num_classes; ++i) {
auto idx = dense_node.template output<val_t>().idx;
vec[i] = tlf::Value::Create(vector_leaf[idx * ps.num_classes + i]);
}
builder->SetLeafVectorNode(key, vec);
break;
}
}
} else {
int left = root + 2 * (node - root) + 1;
int right = root + 2 * (node - root) + 2;
float threshold = dense_node.thresh();
bool default_left = dense_node.def_left();
switch (ps.op) {
case tl::Operator::kLT: break;
case tl::Operator::kLE:
// adjust the threshold
threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity());
break;
case tl::Operator::kGT:
// adjust the threshold; left and right still need to be swapped
threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity());
case tl::Operator::kGE:
// swap left and right
std::swap(left, right);
default_left = !default_left;
break;
default: ASSERT(false, "comparison operator must be <, >, <= or >=");
}
int left_key = node_to_treelite(builder, pkey, root, left);
int right_key = node_to_treelite(builder, pkey, root, right);
builder->SetNumericalTestNode(key,
dense_node.fid(),
ps.op,
tlf::Value::Create(threshold),
default_left,
left_key,
right_key);
}
return key;
}
void init_forest_impl(fil::forest_t* pforest, fil::storage_type_t storage_type)
{
bool random_forest_flag = (ps.output & fil::output_t::AVG) != 0;
int treelite_num_classes =
ps.leaf_algo == fil::leaf_algo_t::FLOAT_UNARY_BINARY ? 1 : ps.num_classes;
std::unique_ptr<tlf::ModelBuilder> model_builder(new tlf::ModelBuilder(ps.num_cols,
treelite_num_classes,
random_forest_flag,
tl::TypeInfo::kFloat32,
tl::TypeInfo::kFloat32));
// prediction transform
if ((ps.output & fil::output_t::SIGMOID) != 0) {
if (ps.num_classes > 2)
model_builder->SetModelParam("pred_transform", "multiclass_ova");
else
model_builder->SetModelParam("pred_transform", "sigmoid");
} else if (ps.leaf_algo != fil::leaf_algo_t::FLOAT_UNARY_BINARY) {
model_builder->SetModelParam("pred_transform", "max_index");
ps.output = fil::output_t(ps.output | fil::output_t::CLASS);
} else if (ps.leaf_algo == GROVE_PER_CLASS) {
model_builder->SetModelParam("pred_transform", "identity_multiclass");
} else {
model_builder->SetModelParam("pred_transform", "identity");
}
// global bias
char* global_bias_str = nullptr;
ASSERT(asprintf(&global_bias_str, "%f", double(ps.global_bias)) > 0,
"cannot convert global_bias into a string");
model_builder->SetModelParam("global_bias", global_bias_str);
::free(global_bias_str);
// build the trees
for (int i_tree = 0; i_tree < ps.num_trees; ++i_tree) {
tlf::TreeBuilder* tree_builder =
new tlf::TreeBuilder(tl::TypeInfo::kFloat32, tl::TypeInfo::kFloat32);
int key_counter = 0;
int root = i_tree * tree_num_nodes();
int root_key = node_to_treelite(tree_builder, &key_counter, root, root);
tree_builder->SetRootNode(root_key);
// InsertTree() consumes tree_builder
TL_CPP_CHECK(model_builder->InsertTree(tree_builder));
}
// commit the model
std::unique_ptr<tl::Model> model = model_builder->CommitModel();
// init FIL forest with the model
char* forest_shape_str = nullptr;
fil::treelite_params_t params;
params.algo = ps.algo;
params.threshold = ps.threshold;
params.output_class = (ps.output & fil::output_t::CLASS) != 0;
params.storage_type = storage_type;
params.blocks_per_sm = ps.blocks_per_sm;
params.threads_per_tree = ps.threads_per_tree;
params.n_items = ps.n_items;
params.pforest_shape_str = ps.print_forest_shape ? &forest_shape_str : nullptr;
fil::from_treelite(handle, pforest, (ModelHandle)model.get(), ¶ms);
CUDA_CHECK(hipStreamSynchronize(stream));
if (ps.print_forest_shape) {
std::string str(forest_shape_str);
for (const char* substr : {"model size",
" MB",
"Depth histogram:",
"Avg nodes per tree",
"Leaf depth",
"Depth histogram fingerprint"}) {
ASSERT(str.find(substr) != std::string::npos,
"\"%s\" not found in forest shape :\n%s",
substr,
str.c_str());
}
}
::free(forest_shape_str);
}
};
class TreeliteDenseFilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override
{
init_forest_impl(pforest, fil::storage_type_t::DENSE);
}
};
class TreeliteSparse16FilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override
{
init_forest_impl(pforest, fil::storage_type_t::SPARSE);
}
};
class TreeliteSparse8FilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override
{
init_forest_impl(pforest, fil::storage_type_t::SPARSE8);
}
};
class TreeliteAutoFilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override
{
init_forest_impl(pforest, fil::storage_type_t::AUTO);
}
};
// test for failures; currently only supported for sparse8 nodes
class TreeliteThrowSparse8FilTest : public TreeliteSparse8FilTest {
protected:
// model import happens in check(), so this function is empty
void SetUp() override {}
void check() { ASSERT_THROW(setup_helper(), raft::exception); }
};
/** mechanism to use named aggregate initialization before C++20, and also use
the struct defaults. Using it directly only works if all defaulted
members come after ones explicitly mentioned.
**/
#define FIL_TEST_PARAMS(...) \
[]() { \
struct NonDefaultFilTestParams : public FilTestParams { \
NonDefaultFilTestParams() { __VA_ARGS__; } \
}; \
return FilTestParams(NonDefaultFilTestParams()); \
}()
// kEQ is intentionally unused, and kLT is default
static const tl::Operator kLE = tl::Operator::kLE;
static const tl::Operator kGT = tl::Operator::kGT;
static const tl::Operator kGE = tl::Operator::kGE;
std::vector<FilTestParams> predict_dense_inputs = {
FIL_TEST_PARAMS(),
FIL_TEST_PARAMS(algo = TREE_REORG),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(output = SIGMOID),
FIL_TEST_PARAMS(output = SIGMOID, algo = TREE_REORG),
FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, num_classes = 2),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = TREE_REORG, num_classes = 2),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = BATCH_TREE_REORG, num_classes = 2),
FIL_TEST_PARAMS(output = AVG),
FIL_TEST_PARAMS(output = AVG, algo = TREE_REORG),
FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2),
FIL_TEST_PARAMS(output = AVG_CLASS, algo = TREE_REORG, num_classes = 2),
FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, num_classes = 2),
FIL_TEST_PARAMS(global_bias = 0.5, algo = TREE_REORG),
FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5, algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(output = AVG, global_bias = 0.5),
FIL_TEST_PARAMS(
output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, algo = TREE_REORG, num_classes = 2),
FIL_TEST_PARAMS(output = SIGMOID, algo = ALGO_AUTO),
FIL_TEST_PARAMS(
output = AVG_CLASS, algo = BATCH_TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 5),
FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2),
FIL_TEST_PARAMS(algo = TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 5),
FIL_TEST_PARAMS(output = SIGMOID, leaf_algo = CATEGORICAL_LEAF, num_classes = 7),
FIL_TEST_PARAMS(
global_bias = 0.5, algo = TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 4),
FIL_TEST_PARAMS(output = AVG, global_bias = 0.5, leaf_algo = CATEGORICAL_LEAF, num_classes = 4),
FIL_TEST_PARAMS(
output = AVG_CLASS, algo = BATCH_TREE_REORG, leaf_algo = GROVE_PER_CLASS, num_classes = 5),
FIL_TEST_PARAMS(algo = TREE_REORG, leaf_algo = GROVE_PER_CLASS, num_classes = 5),
FIL_TEST_PARAMS(num_trees = 49, output = SIGMOID, leaf_algo = GROVE_PER_CLASS, num_classes = 7),
FIL_TEST_PARAMS(num_trees = 52,
global_bias = 0.5,
algo = TREE_REORG,
leaf_algo = GROVE_PER_CLASS,
num_classes = 4),
FIL_TEST_PARAMS(
num_trees = 52, output = AVG, global_bias = 0.5, leaf_algo = GROVE_PER_CLASS, num_classes = 4),
FIL_TEST_PARAMS(blocks_per_sm = 1),
FIL_TEST_PARAMS(blocks_per_sm = 4),
FIL_TEST_PARAMS(num_classes = 3, blocks_per_sm = 1, leaf_algo = CATEGORICAL_LEAF),
FIL_TEST_PARAMS(num_classes = 3, blocks_per_sm = 4, leaf_algo = CATEGORICAL_LEAF),
FIL_TEST_PARAMS(num_classes = 5, blocks_per_sm = 1, leaf_algo = GROVE_PER_CLASS),
FIL_TEST_PARAMS(num_classes = 5, blocks_per_sm = 4, leaf_algo = GROVE_PER_CLASS),
FIL_TEST_PARAMS(
leaf_algo = GROVE_PER_CLASS, blocks_per_sm = 1, num_trees = 512, num_classes = 512),
FIL_TEST_PARAMS(
leaf_algo = GROVE_PER_CLASS, blocks_per_sm = 4, num_trees = 512, num_classes = 512),
FIL_TEST_PARAMS(num_trees = 52, output = SOFTMAX, leaf_algo = GROVE_PER_CLASS, num_classes = 4),
FIL_TEST_PARAMS(
num_trees = 52, output = AVG_SOFTMAX, leaf_algo = GROVE_PER_CLASS, num_classes = 4),
FIL_TEST_PARAMS(num_trees = 3 * (FIL_TPB + 1),
output = SOFTMAX,
leaf_algo = GROVE_PER_CLASS,
num_classes = FIL_TPB + 1),
FIL_TEST_PARAMS(num_trees = 3 * (FIL_TPB + 1),
output = AVG_SOFTMAX,
leaf_algo = GROVE_PER_CLASS,
num_classes = FIL_TPB + 1),
FIL_TEST_PARAMS(num_cols = 100'000, depth = 5, num_trees = 1, leaf_algo = FLOAT_UNARY_BINARY),
FIL_TEST_PARAMS(num_rows = 101,
num_cols = 100'000,
depth = 5,
num_trees = 9,
algo = BATCH_TREE_REORG,
leaf_algo = GROVE_PER_CLASS,
num_classes = 3),
FIL_TEST_PARAMS(num_rows = 102,
num_cols = 100'000,
depth = 5,
num_trees = 3 * (FIL_TPB + 1),
algo = BATCH_TREE_REORG,
leaf_algo = GROVE_PER_CLASS,
num_classes = FIL_TPB + 1),
FIL_TEST_PARAMS(num_rows = 103,
num_cols = 100'000,
depth = 5,
num_trees = 1,
algo = BATCH_TREE_REORG,
leaf_algo = CATEGORICAL_LEAF,
num_classes = 3),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, threads_per_tree = 2),
FIL_TEST_PARAMS(algo = NAIVE, threads_per_tree = 4),
FIL_TEST_PARAMS(algo = TREE_REORG, threads_per_tree = 8),
FIL_TEST_PARAMS(algo = ALGO_AUTO, threads_per_tree = 16),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, threads_per_tree = 32),
FIL_TEST_PARAMS(algo = NAIVE, threads_per_tree = 64),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, threads_per_tree = 128, n_items = 3),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, threads_per_tree = 256),
FIL_TEST_PARAMS(algo = TREE_REORG, threads_per_tree = 32, n_items = 1),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, threads_per_tree = 16, n_items = 4),
FIL_TEST_PARAMS(algo = NAIVE, threads_per_tree = 32, n_items = 4),
FIL_TEST_PARAMS(
num_rows = 500, num_cols = 2000, algo = BATCH_TREE_REORG, threads_per_tree = 64, n_items = 4),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_classes = 2),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_trees = 9, num_classes = 20),
FIL_TEST_PARAMS(num_rows = 103,
num_cols = 100'000,
depth = 5,
num_trees = 1,
algo = BATCH_TREE_REORG,
leaf_algo = VECTOR_LEAF,
num_classes = 3),
FIL_TEST_PARAMS(num_rows = 103,
num_cols = 5,
depth = 5,
num_trees = 3,
leaf_algo = VECTOR_LEAF,
num_classes = 4000),
};
TEST_P(PredictDenseFilTest, Predict) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, PredictDenseFilTest, testing::ValuesIn(predict_dense_inputs));
std::vector<FilTestParams> predict_sparse_inputs = {
FIL_TEST_PARAMS(),
FIL_TEST_PARAMS(output = SIGMOID),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, num_classes = 2),
FIL_TEST_PARAMS(output = AVG),
FIL_TEST_PARAMS(output = AVG_CLASS, global_bias = 0.5, num_classes = 2),
FIL_TEST_PARAMS(global_bias = 0.5),
FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5),
FIL_TEST_PARAMS(output = AVG, global_bias = 0.5),
FIL_TEST_PARAMS(output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, num_classes = 2),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = ALGO_AUTO, num_classes = 2),
FIL_TEST_PARAMS(output = AVG_CLASS,
threshold = 1.0,
global_bias = 0.5,
leaf_algo = CATEGORICAL_LEAF,
num_classes = 5000),
FIL_TEST_PARAMS(global_bias = 0.5, leaf_algo = CATEGORICAL_LEAF, num_classes = 6),
FIL_TEST_PARAMS(output = CLASS, leaf_algo = CATEGORICAL_LEAF, num_classes = 3),
FIL_TEST_PARAMS(leaf_algo = CATEGORICAL_LEAF, num_classes = 3),
FIL_TEST_PARAMS(depth = 2,
num_trees = 5000,
output = AVG_CLASS,
threshold = 1.0,
global_bias = 0.5,
leaf_algo = GROVE_PER_CLASS,
num_classes = 5000),
FIL_TEST_PARAMS(num_trees = 60, global_bias = 0.5, leaf_algo = GROVE_PER_CLASS, num_classes = 6),
FIL_TEST_PARAMS(num_trees = 51, output = CLASS, leaf_algo = GROVE_PER_CLASS, num_classes = 3),
FIL_TEST_PARAMS(num_trees = 51, leaf_algo = GROVE_PER_CLASS, num_classes = 3),
FIL_TEST_PARAMS(algo = NAIVE, threads_per_tree = 2),
FIL_TEST_PARAMS(algo = NAIVE, threads_per_tree = 8, n_items = 1),
FIL_TEST_PARAMS(algo = ALGO_AUTO, threads_per_tree = 16, n_items = 1),
FIL_TEST_PARAMS(algo = ALGO_AUTO, threads_per_tree = 32),
FIL_TEST_PARAMS(num_cols = 1, num_trees = 1, algo = NAIVE, threads_per_tree = 64, n_items = 1),
FIL_TEST_PARAMS(num_rows = 500, num_cols = 2000, algo = NAIVE, threads_per_tree = 64),
FIL_TEST_PARAMS(
num_rows = 500, num_cols = 2000, algo = ALGO_AUTO, threads_per_tree = 256, n_items = 1),
FIL_TEST_PARAMS(num_trees = 51, leaf_algo = VECTOR_LEAF, num_classes = 15),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_trees = 9, num_classes = 20),
FIL_TEST_PARAMS(num_rows = 103,
num_cols = 1000,
depth = 5,
num_trees = 1,
leaf_algo = VECTOR_LEAF,
num_classes = 3),
FIL_TEST_PARAMS(num_rows = 103,
num_cols = 5,
depth = 5,
num_trees = 3,
leaf_algo = VECTOR_LEAF,
num_classes = 4000),
FIL_TEST_PARAMS(num_rows = 103,
num_cols = 5,
depth = 5,
num_trees = 530,
leaf_algo = VECTOR_LEAF,
num_classes = 11),
FIL_TEST_PARAMS(num_rows = 103,
num_cols = 5,
depth = 5,
num_trees = 530,
leaf_algo = VECTOR_LEAF,
num_classes = 1111),
};
TEST_P(PredictSparse16FilTest, Predict) { compare(); }
// Temporarily disabled, see https://github.com/rapidsai/cuml/issues/3205
INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse16FilTest, testing::ValuesIn(predict_sparse_inputs));
TEST_P(PredictSparse8FilTest, Predict) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse8FilTest, testing::ValuesIn(predict_sparse_inputs));
std::vector<FilTestParams> import_dense_inputs = {
FIL_TEST_PARAMS(),
FIL_TEST_PARAMS(output = SIGMOID, op = kLE),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, op = kGT, num_classes = 2),
FIL_TEST_PARAMS(output = AVG, op = kGE),
FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2),
FIL_TEST_PARAMS(algo = TREE_REORG, op = kLE),
FIL_TEST_PARAMS(output = SIGMOID, algo = TREE_REORG, op = kGT),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = TREE_REORG, op = kGE, num_classes = 2),
FIL_TEST_PARAMS(output = AVG, algo = TREE_REORG),
FIL_TEST_PARAMS(output = AVG_CLASS, algo = TREE_REORG, op = kLE, num_classes = 2),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, op = kLE),
FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG, op = kLE),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, op = kGT),
FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG, op = kGT),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, op = kGE),
FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG, op = kGE),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = BATCH_TREE_REORG, num_classes = 2),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = BATCH_TREE_REORG, op = kLE, num_classes = 2),
FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG, op = kLE),
FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, op = kGT, num_classes = 2),
FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, op = kGE, num_classes = 2),
FIL_TEST_PARAMS(global_bias = 0.5, algo = TREE_REORG),
FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5, algo = BATCH_TREE_REORG, op = kLE),
FIL_TEST_PARAMS(output = AVG, global_bias = 0.5, op = kGT),
FIL_TEST_PARAMS(output = AVG_CLASS,
threshold = 1.0,
global_bias = 0.5,
algo = TREE_REORG,
op = kGE,
num_classes = 2),
FIL_TEST_PARAMS(output = SIGMOID, algo = ALGO_AUTO, op = kLE),
FIL_TEST_PARAMS(output = SIGMOID, algo = ALGO_AUTO, op = kLE),
FIL_TEST_PARAMS(
output = AVG, algo = BATCH_TREE_REORG, op = kGE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5),
FIL_TEST_PARAMS(
output = AVG, algo = BATCH_TREE_REORG, op = kGT, leaf_algo = CATEGORICAL_LEAF, num_classes = 6),
FIL_TEST_PARAMS(
output = AVG, algo = BATCH_TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 3),
FIL_TEST_PARAMS(
output = AVG, algo = BATCH_TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5),
FIL_TEST_PARAMS(
output = AVG_CLASS, algo = TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5),
FIL_TEST_PARAMS(
output = AVG, algo = TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 7),
FIL_TEST_PARAMS(output = AVG, leaf_algo = CATEGORICAL_LEAF, num_classes = 6),
FIL_TEST_PARAMS(output = CLASS,
algo = BATCH_TREE_REORG,
op = kGE,
leaf_algo = GROVE_PER_CLASS,
num_classes = 5),
FIL_TEST_PARAMS(num_trees = 48,
output = CLASS,
algo = BATCH_TREE_REORG,
op = kGT,
leaf_algo = GROVE_PER_CLASS,
num_classes = 6),
FIL_TEST_PARAMS(num_trees = 51,
output = CLASS,
algo = BATCH_TREE_REORG,
op = kLE,
leaf_algo = GROVE_PER_CLASS,
num_classes = 3),
FIL_TEST_PARAMS(output = CLASS,
algo = BATCH_TREE_REORG,
op = kLE,
leaf_algo = GROVE_PER_CLASS,
num_classes = 5),
FIL_TEST_PARAMS(
output = CLASS, algo = TREE_REORG, op = kLE, leaf_algo = GROVE_PER_CLASS, num_classes = 5),
FIL_TEST_PARAMS(num_trees = 49,
output = CLASS,
algo = TREE_REORG,
op = kLE,
leaf_algo = GROVE_PER_CLASS,
num_classes = 7),
FIL_TEST_PARAMS(num_trees = 48, output = CLASS, leaf_algo = GROVE_PER_CLASS, num_classes = 6),
FIL_TEST_PARAMS(print_forest_shape = true),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_classes = 2),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_trees = 19, num_classes = 20),
};
TEST_P(TreeliteDenseFilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteDenseFilTest, testing::ValuesIn(import_dense_inputs));
std::vector<FilTestParams> import_sparse_inputs = {
FIL_TEST_PARAMS(),
FIL_TEST_PARAMS(output = SIGMOID, op = kLE),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, op = kGT, num_classes = 2),
FIL_TEST_PARAMS(output = AVG, op = kGE),
FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2),
FIL_TEST_PARAMS(global_bias = 0.5),
FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5, op = kLE),
FIL_TEST_PARAMS(output = AVG, global_bias = 0.5, op = kGT),
FIL_TEST_PARAMS(
output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, op = kGE, num_classes = 2),
FIL_TEST_PARAMS(algo = ALGO_AUTO),
FIL_TEST_PARAMS(
output = AVG_CLASS, threshold = 1.0, op = kGE, leaf_algo = CATEGORICAL_LEAF, num_classes = 10),
FIL_TEST_PARAMS(output = AVG, algo = ALGO_AUTO, leaf_algo = CATEGORICAL_LEAF, num_classes = 4),
FIL_TEST_PARAMS(output = AVG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5),
FIL_TEST_PARAMS(output = AVG, leaf_algo = CATEGORICAL_LEAF, num_classes = 3),
FIL_TEST_PARAMS(output = CLASS,
threshold = 1.0,
global_bias = 0.5,
op = kGE,
leaf_algo = GROVE_PER_CLASS,
num_classes = 10),
FIL_TEST_PARAMS(
num_trees = 52, output = CLASS, algo = ALGO_AUTO, leaf_algo = GROVE_PER_CLASS, num_classes = 4),
FIL_TEST_PARAMS(output = CLASS, op = kLE, leaf_algo = GROVE_PER_CLASS, num_classes = 5),
FIL_TEST_PARAMS(num_trees = 51,
output = CLASS,
global_bias = 0.5,
leaf_algo = GROVE_PER_CLASS,
num_classes = 3),
FIL_TEST_PARAMS(num_trees = 51,
output = SIGMOID_CLASS,
global_bias = 0.5,
leaf_algo = GROVE_PER_CLASS,
num_classes = 3),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_classes = 2),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_trees = 19, num_classes = 20),
};
TEST_P(TreeliteSparse16FilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse16FilTest, testing::ValuesIn(import_sparse_inputs));
TEST_P(TreeliteSparse8FilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse8FilTest, testing::ValuesIn(import_sparse_inputs));
std::vector<FilTestParams> import_auto_inputs = {
FIL_TEST_PARAMS(depth = 10, algo = ALGO_AUTO),
FIL_TEST_PARAMS(depth = 15, algo = ALGO_AUTO),
FIL_TEST_PARAMS(depth = 19, algo = ALGO_AUTO),
FIL_TEST_PARAMS(depth = 19, algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(
depth = 10, output = AVG, algo = ALGO_AUTO, leaf_algo = CATEGORICAL_LEAF, num_classes = 3),
FIL_TEST_PARAMS(depth = 10,
num_trees = 51,
output = CLASS,
algo = ALGO_AUTO,
leaf_algo = GROVE_PER_CLASS,
num_classes = 3),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_classes = 3, algo = ALGO_AUTO),
#if 0
FIL_TEST_PARAMS(depth = 19, output = AVG, algo = BATCH_TREE_REORG,
leaf_algo = CATEGORICAL_LEAF, num_classes = 6),
#endif
};
TEST_P(TreeliteAutoFilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteAutoFilTest, testing::ValuesIn(import_auto_inputs));
// adjust test parameters if the sparse8 format changes
std::vector<FilTestParams> import_throw_sparse8_inputs = {
// too many features
FIL_TEST_PARAMS(num_rows = 100, num_cols = 20000, depth = 10),
// too many tree nodes
FIL_TEST_PARAMS(depth = 16, num_trees = 5, leaf_prob = 0),
};
TEST_P(TreeliteThrowSparse8FilTest, Import) { check(); }
INSTANTIATE_TEST_CASE_P(FilTests,
TreeliteThrowSparse8FilTest,
testing::ValuesIn(import_throw_sparse8_inputs));
} // namespace ML
|
052e5869d9b60a9be1dad8b557ef7d4c7818d7b5.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/fil/fil.h>
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <treelite/c_api.h>
#include <treelite/frontend.h>
#include <treelite/tree.h>
#include <cmath>
#include <cstdio>
#include <limits>
#include <memory>
#include <numeric>
#include <ostream>
#include <raft/cuda_utils.cuh>
#include <raft/random/rng.cuh>
#include <utility>
#include "../../src/fil/internal.cuh"
#define TL_CPP_CHECK(call) ASSERT(int(call) >= 0, "treelite call error")
namespace ML {
namespace tl = treelite;
namespace tlf = treelite::frontend;
using namespace fil;
struct FilTestParams {
// input data parameters
int num_rows = 20'000;
int num_cols = 50;
float nan_prob = 0.05;
// forest parameters
int depth = 8;
int num_trees = 50;
float leaf_prob = 0.05;
// output parameters
output_t output = output_t::RAW;
float threshold = 0.0f;
float global_bias = 0.0f;
// runtime parameters
int blocks_per_sm = 0;
int threads_per_tree = 1;
int n_items = 0;
algo_t algo = algo_t::NAIVE;
int seed = 42;
float tolerance = 2e-3f;
bool print_forest_shape = false;
// treelite parameters, only used for treelite tests
tl::Operator op = tl::Operator::kLT;
leaf_algo_t leaf_algo = leaf_algo_t::FLOAT_UNARY_BINARY;
// when FLOAT_UNARY_BINARY == leaf_algo:
// num_classes = 1 means it's regression
// num_classes = 2 means it's binary classification
// (complement probabilities, then use threshold)
// when GROVE_PER_CLASS == leaf_algo:
// it's multiclass classification (num_classes must be > 2),
// done by splitting the forest in num_classes groups,
// each of which computes one-vs-all probability for its class.
// when CATEGORICAL_LEAF == leaf_algo:
// num_classes must be > 1 and it's multiclass classification.
// done by storing the class label in each leaf and voting.
// it's used in treelite ModelBuilder initialization
int num_classes = 1;
size_t num_proba_outputs() { return num_rows * std::max(num_classes, 2); }
size_t num_preds_outputs() { return num_rows; }
};
std::string output2str(fil::output_t output)
{
if (output == fil::RAW) return "RAW";
std::string s = "";
if (output & fil::AVG) s += "| AVG";
if (output & fil::CLASS) s += "| CLASS";
if (output & fil::SIGMOID) s += "| SIGMOID";
if (output & fil::SOFTMAX) s += "| SOFTMAX";
return s;
}
std::ostream& operator<<(std::ostream& os, const FilTestParams& ps)
{
os << "num_rows = " << ps.num_rows << ", num_cols = " << ps.num_cols
<< ", nan_prob = " << ps.nan_prob << ", depth = " << ps.depth
<< ", num_trees = " << ps.num_trees << ", leaf_prob = " << ps.leaf_prob
<< ", output = " << output2str(ps.output) << ", threshold = " << ps.threshold
<< ", threads_per_tree = " << ps.threads_per_tree << ", n_items = " << ps.n_items
<< ", blocks_per_sm = " << ps.blocks_per_sm << ", algo = " << ps.algo << ", seed = " << ps.seed
<< ", tolerance = " << ps.tolerance << ", op = " << tl::OpName(ps.op)
<< ", global_bias = " << ps.global_bias << ", leaf_algo = " << ps.leaf_algo
<< ", num_classes = " << ps.num_classes;
return os;
}
__global__ void nan_kernel(float* data, const bool* mask, int len, float nan)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= len) return;
if (!mask[tid]) data[tid] = nan;
}
float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); }
class BaseFilTest : public testing::TestWithParam<FilTestParams> {
protected:
void setup_helper()
{
// setup
ps = testing::TestWithParam<FilTestParams>::GetParam();
CUDA_CHECK(cudaStreamCreate(&stream));
handle.set_stream(stream);
generate_forest();
generate_data();
predict_on_cpu();
predict_on_gpu();
}
void SetUp() override { setup_helper(); }
void TearDown() override
{
CUDA_CHECK(cudaFree(preds_d));
CUDA_CHECK(cudaFree(want_preds_d));
CUDA_CHECK(cudaFree(data_d));
CUDA_CHECK(cudaFree(want_proba_d));
CUDA_CHECK(cudaFree(proba_d));
}
void generate_forest()
{
size_t num_nodes = forest_num_nodes();
// helper data
/// weights, used as float* or int*
int* weights_d = nullptr;
float* thresholds_d = nullptr;
int* fids_d = nullptr;
bool* def_lefts_d = nullptr;
bool* is_leafs_d = nullptr;
bool* def_lefts_h = nullptr;
bool* is_leafs_h = nullptr;
// allocate GPU data
raft::allocate(weights_d, num_nodes);
// sizeof(float) == sizeof(int)
raft::allocate(thresholds_d, num_nodes);
raft::allocate(fids_d, num_nodes);
raft::allocate(def_lefts_d, num_nodes);
raft::allocate(is_leafs_d, num_nodes);
// generate on-GPU random data
raft::random::Rng r(ps.seed);
if (ps.leaf_algo == fil::leaf_algo_t::CATEGORICAL_LEAF) {
// [0..num_classes)
r.uniformInt((int*)weights_d, num_nodes, 0, ps.num_classes, stream);
} else if (ps.leaf_algo == fil::leaf_algo_t::VECTOR_LEAF) {
std::mt19937 gen(3);
std::uniform_real_distribution<> dist(0, 1);
vector_leaf.resize(num_nodes * ps.num_classes);
for (size_t i = 0; i < vector_leaf.size(); i++) {
vector_leaf[i] = dist(gen);
}
// Normalise probabilities to 1
for (size_t i = 0; i < vector_leaf.size(); i += ps.num_classes) {
auto sum = std::accumulate(&vector_leaf[i], &vector_leaf[i + ps.num_classes], 0.0f);
for (size_t j = i; j < i + ps.num_classes; j++) {
vector_leaf[j] /= sum;
}
}
} else {
r.uniform((float*)weights_d, num_nodes, -1.0f, 1.0f, stream);
}
r.uniform(thresholds_d, num_nodes, -1.0f, 1.0f, stream);
r.uniformInt(fids_d, num_nodes, 0, ps.num_cols, stream);
r.bernoulli(def_lefts_d, num_nodes, 0.5f, stream);
r.bernoulli(is_leafs_d, num_nodes, 1.0f - ps.leaf_prob, stream);
// copy data to host
std::vector<float> thresholds_h(num_nodes);
std::vector<int> weights_h(num_nodes), fids_h(num_nodes);
def_lefts_h = new bool[num_nodes];
is_leafs_h = new bool[num_nodes];
raft::update_host(weights_h.data(), (int*)weights_d, num_nodes, stream);
raft::update_host(thresholds_h.data(), thresholds_d, num_nodes, stream);
raft::update_host(fids_h.data(), fids_d, num_nodes, stream);
raft::update_host(def_lefts_h, def_lefts_d, num_nodes, stream);
raft::update_host(is_leafs_h, is_leafs_d, num_nodes, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
// mark leaves
for (size_t i = 0; i < ps.num_trees; ++i) {
int num_tree_nodes = tree_num_nodes();
size_t leaf_start = num_tree_nodes * i + num_tree_nodes / 2;
size_t leaf_end = num_tree_nodes * (i + 1);
for (size_t j = leaf_start; j < leaf_end; ++j) {
is_leafs_h[j] = true;
}
}
// initialize nodes
nodes.resize(num_nodes);
for (size_t i = 0; i < num_nodes; ++i) {
fil::val_t w;
switch (ps.leaf_algo) {
case fil::leaf_algo_t::CATEGORICAL_LEAF: w.idx = weights_h[i]; break;
case fil::leaf_algo_t::FLOAT_UNARY_BINARY:
case fil::leaf_algo_t::GROVE_PER_CLASS:
// not relying on fil::val_t internals
// merely that we copied floats into weights_h earlier
std::memcpy(&w.f, &weights_h[i], sizeof w.f);
break;
case fil::leaf_algo_t::VECTOR_LEAF: w.idx = i; break;
default: ASSERT(false, "internal error: invalid ps.leaf_algo");
}
nodes[i] = fil::dense_node(w, thresholds_h[i], fids_h[i], def_lefts_h[i], is_leafs_h[i]);
}
// clean up
delete[] def_lefts_h;
delete[] is_leafs_h;
CUDA_CHECK(cudaFree(is_leafs_d));
CUDA_CHECK(cudaFree(def_lefts_d));
CUDA_CHECK(cudaFree(fids_d));
CUDA_CHECK(cudaFree(thresholds_d));
CUDA_CHECK(cudaFree(weights_d));
}
void generate_data()
{
// allocate arrays
size_t num_data = ps.num_rows * ps.num_cols;
raft::allocate(data_d, num_data);
bool* mask_d = nullptr;
raft::allocate(mask_d, num_data);
// generate random data
raft::random::Rng r(ps.seed);
r.uniform(data_d, num_data, -1.0f, 1.0f, stream);
r.bernoulli(mask_d, num_data, ps.nan_prob, stream);
int tpb = 256;
nan_kernel<<<raft::ceildiv(int(num_data), tpb), tpb, 0, stream>>>(
data_d, mask_d, num_data, std::numeric_limits<float>::quiet_NaN());
CUDA_CHECK(cudaPeekAtLastError());
// copy to host
data_h.resize(num_data);
raft::update_host(data_h.data(), data_d, num_data, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
// clean up
CUDA_CHECK(cudaFree(mask_d));
}
void apply_softmax(float* class_scores)
{
float max = *std::max_element(class_scores, &class_scores[ps.num_classes]);
for (int i = 0; i < ps.num_classes; ++i)
class_scores[i] = expf(class_scores[i] - max);
float sum = std::accumulate(class_scores, &class_scores[ps.num_classes], 0.0f);
for (int i = 0; i < ps.num_classes; ++i)
class_scores[i] /= sum;
}
void transform(float f, float& proba, float& output)
{
if ((ps.output & fil::output_t::AVG) != 0) {
if (ps.leaf_algo == fil::leaf_algo_t::GROVE_PER_CLASS) {
f /= ps.num_trees / ps.num_classes;
} else {
f *= 1.0f / ps.num_trees;
}
}
f += ps.global_bias;
if ((ps.output & fil::output_t::SIGMOID) != 0) { f = sigmoid(f); }
proba = f;
if ((ps.output & fil::output_t::CLASS) != 0) { f = f > ps.threshold ? 1.0f : 0.0f; }
output = f;
}
void complement(float* proba) { proba[0] = 1.0f - proba[1]; }
void predict_on_cpu()
{
// predict on host
std::vector<float> want_preds_h(ps.num_preds_outputs());
std::vector<float> want_proba_h(ps.num_proba_outputs());
int num_nodes = tree_num_nodes();
std::vector<float> class_scores(ps.num_classes);
switch (ps.leaf_algo) {
case fil::leaf_algo_t::FLOAT_UNARY_BINARY:
for (int i = 0; i < ps.num_rows; ++i) {
float pred = 0.0f;
for (int j = 0; j < ps.num_trees; ++j) {
pred += infer_one_tree(&nodes[j * num_nodes], &data_h[i * ps.num_cols]).f;
}
transform(pred, want_proba_h[i * 2 + 1], want_preds_h[i]);
complement(&(want_proba_h[i * 2]));
}
break;
case fil::leaf_algo_t::GROVE_PER_CLASS:
for (int row = 0; row < ps.num_rows; ++row) {
std::fill(class_scores.begin(), class_scores.end(), 0.0f);
for (int tree = 0; tree < ps.num_trees; ++tree) {
class_scores[tree % ps.num_classes] +=
infer_one_tree(&nodes[tree * num_nodes], &data_h[row * ps.num_cols]).f;
}
want_preds_h[row] =
std::max_element(class_scores.begin(), class_scores.end()) - class_scores.begin();
for (int c = 0; c < ps.num_classes; ++c) {
float thresholded_proba; // not used;
transform(class_scores[c], want_proba_h[row * ps.num_classes + c], thresholded_proba);
}
if ((ps.output & fil::output_t::SOFTMAX) != 0)
apply_softmax(&want_proba_h[row * ps.num_classes]);
}
break;
case fil::leaf_algo_t::CATEGORICAL_LEAF: {
std::vector<int> class_votes(ps.num_classes);
for (int r = 0; r < ps.num_rows; ++r) {
std::fill(class_votes.begin(), class_votes.end(), 0);
for (int j = 0; j < ps.num_trees; ++j) {
int class_label = infer_one_tree(&nodes[j * num_nodes], &data_h[r * ps.num_cols]).idx;
++class_votes[class_label];
}
for (int c = 0; c < ps.num_classes; ++c) {
float thresholded_proba; // not used; do argmax instead
transform(class_votes[c], want_proba_h[r * ps.num_classes + c], thresholded_proba);
}
want_preds_h[r] =
std::max_element(class_votes.begin(), class_votes.end()) - class_votes.begin();
}
break;
}
case fil::leaf_algo_t::VECTOR_LEAF:
for (int r = 0; r < ps.num_rows; ++r) {
std::vector<float> class_probabilities(ps.num_classes);
for (int j = 0; j < ps.num_trees; ++j) {
int vector_index = infer_one_tree(&nodes[j * num_nodes], &data_h[r * ps.num_cols]).idx;
float sum = 0.0;
for (int k = 0; k < ps.num_classes; k++) {
class_probabilities[k] += vector_leaf[vector_index * ps.num_classes + k];
sum += vector_leaf[vector_index * ps.num_classes + k];
}
ASSERT_LE(std::abs(sum - 1.0f), 1e-5);
}
for (int c = 0; c < ps.num_classes; ++c) {
want_proba_h[r * ps.num_classes + c] = class_probabilities[c];
}
want_preds_h[r] =
std::max_element(class_probabilities.begin(), class_probabilities.end()) -
class_probabilities.begin();
}
break;
}
// copy to GPU
raft::allocate(want_preds_d, ps.num_preds_outputs());
raft::allocate(want_proba_d, ps.num_proba_outputs());
raft::update_device(want_preds_d, want_preds_h.data(), ps.num_preds_outputs(), stream);
raft::update_device(want_proba_d, want_proba_h.data(), ps.num_proba_outputs(), stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
virtual void init_forest(fil::forest_t* pforest) = 0;
void predict_on_gpu()
{
fil::forest_t forest = nullptr;
init_forest(&forest);
// predict
raft::allocate(preds_d, ps.num_preds_outputs());
raft::allocate(proba_d, ps.num_proba_outputs());
fil::predict(handle, forest, preds_d, data_d, ps.num_rows);
fil::predict(handle, forest, proba_d, data_d, ps.num_rows, true);
CUDA_CHECK(cudaStreamSynchronize(stream));
// cleanup
fil::free(handle, forest);
}
void compare()
{
ASSERT_TRUE(raft::devArrMatch(want_proba_d,
proba_d,
ps.num_proba_outputs(),
raft::CompareApprox<float>(ps.tolerance),
stream));
float tolerance = ps.leaf_algo == fil::leaf_algo_t::FLOAT_UNARY_BINARY
? ps.tolerance
: std::numeric_limits<float>::epsilon();
// in multi-class prediction, floats represent the most likely class
// and would be generated by converting an int to float
ASSERT_TRUE(raft::devArrMatch(
want_preds_d, preds_d, ps.num_rows, raft::CompareApprox<float>(tolerance), stream));
}
fil::val_t infer_one_tree(fil::dense_node* root, float* data)
{
int curr = 0;
fil::val_t output{.f = 0.0f};
for (;;) {
const fil::dense_node& node = root[curr];
if (node.is_leaf()) return node.base_node::output<val_t>();
float val = data[node.fid()];
bool cond = isnan(val) ? !node.def_left() : val >= node.thresh();
curr = (curr << 1) + 1 + (cond ? 1 : 0);
}
return output;
}
int tree_num_nodes() { return (1 << (ps.depth + 1)) - 1; }
int forest_num_nodes() { return tree_num_nodes() * ps.num_trees; }
// predictions
float* preds_d = nullptr;
float* proba_d = nullptr;
float* want_preds_d = nullptr;
float* want_proba_d = nullptr;
// input data
float* data_d = nullptr;
std::vector<float> data_h;
// forest data
std::vector<fil::dense_node> nodes;
std::vector<float> vector_leaf;
// parameters
cudaStream_t stream;
raft::handle_t handle;
FilTestParams ps;
};
class PredictDenseFilTest : public BaseFilTest {
protected:
void init_forest(fil::forest_t* pforest) override
{
// init FIL model
fil::forest_params_t fil_ps;
fil_ps.depth = ps.depth;
fil_ps.num_trees = ps.num_trees;
fil_ps.num_cols = ps.num_cols;
fil_ps.algo = ps.algo;
fil_ps.output = ps.output;
fil_ps.threshold = ps.threshold;
fil_ps.global_bias = ps.global_bias;
fil_ps.leaf_algo = ps.leaf_algo;
fil_ps.num_classes = ps.num_classes;
fil_ps.blocks_per_sm = ps.blocks_per_sm;
fil_ps.threads_per_tree = ps.threads_per_tree;
fil_ps.n_items = ps.n_items;
fil::init_dense(handle, pforest, nodes.data(), &fil_ps, vector_leaf);
}
};
template <typename fil_node_t>
class BasePredictSparseFilTest : public BaseFilTest {
protected:
void dense2sparse_node(const fil::dense_node* dense_root,
int i_dense,
int i_sparse_root,
int i_sparse)
{
const fil::dense_node& node = dense_root[i_dense];
if (node.is_leaf()) {
// leaf sparse node
sparse_nodes[i_sparse] = fil_node_t(node.base_node::output<val_t>(),
node.thresh(),
node.fid(),
node.def_left(),
node.is_leaf(),
0);
return;
}
// inner sparse node
// reserve space for children
int left_index = sparse_nodes.size();
sparse_nodes.push_back(fil_node_t());
sparse_nodes.push_back(fil_node_t());
sparse_nodes[i_sparse] = fil_node_t(node.base_node::output<val_t>(),
node.thresh(),
node.fid(),
node.def_left(),
node.is_leaf(),
left_index - i_sparse_root);
dense2sparse_node(dense_root, 2 * i_dense + 1, i_sparse_root, left_index);
dense2sparse_node(dense_root, 2 * i_dense + 2, i_sparse_root, left_index + 1);
}
void dense2sparse_tree(const fil::dense_node* dense_root)
{
int i_sparse_root = sparse_nodes.size();
sparse_nodes.push_back(fil_node_t());
dense2sparse_node(dense_root, 0, i_sparse_root, i_sparse_root);
trees.push_back(i_sparse_root);
}
void dense2sparse()
{
for (int tree = 0; tree < ps.num_trees; ++tree) {
dense2sparse_tree(&nodes[tree * tree_num_nodes()]);
}
}
void init_forest(fil::forest_t* pforest) override
{
// init FIL model
fil::forest_params_t fil_params;
fil_params.num_trees = ps.num_trees;
fil_params.num_cols = ps.num_cols;
fil_params.algo = ps.algo;
fil_params.output = ps.output;
fil_params.threshold = ps.threshold;
fil_params.global_bias = ps.global_bias;
fil_params.leaf_algo = ps.leaf_algo;
fil_params.num_classes = ps.num_classes;
fil_params.blocks_per_sm = ps.blocks_per_sm;
fil_params.threads_per_tree = ps.threads_per_tree;
fil_params.n_items = ps.n_items;
dense2sparse();
fil_params.num_nodes = sparse_nodes.size();
fil::init_sparse(handle, pforest, trees.data(), sparse_nodes.data(), &fil_params, vector_leaf);
}
std::vector<fil_node_t> sparse_nodes;
std::vector<int> trees;
};
typedef BasePredictSparseFilTest<fil::sparse_node16> PredictSparse16FilTest;
typedef BasePredictSparseFilTest<fil::sparse_node8> PredictSparse8FilTest;
class TreeliteFilTest : public BaseFilTest {
protected:
/** adds nodes[node] of tree starting at index root to builder
at index at *pkey, increments *pkey,
and returns the treelite key of the node */
int node_to_treelite(tlf::TreeBuilder* builder, int* pkey, int root, int node)
{
int key = (*pkey)++;
builder->CreateNode(key);
const fil::dense_node& dense_node = nodes[node];
if (dense_node.is_leaf()) {
switch (ps.leaf_algo) {
case fil::leaf_algo_t::FLOAT_UNARY_BINARY:
case fil::leaf_algo_t::GROVE_PER_CLASS:
// default is fil::FLOAT_UNARY_BINARY
builder->SetLeafNode(key, tlf::Value::Create(dense_node.base_node::output<val_t>().f));
break;
case fil::leaf_algo_t::CATEGORICAL_LEAF: {
std::vector<tlf::Value> vec(ps.num_classes);
for (int i = 0; i < ps.num_classes; ++i) {
vec[i] = tlf::Value::Create(i == dense_node.template output<val_t>().idx ? 1.0f : 0.0f);
}
builder->SetLeafVectorNode(key, vec);
break;
}
case fil::leaf_algo_t::VECTOR_LEAF: {
std::vector<tlf::Value> vec(ps.num_classes);
for (int i = 0; i < ps.num_classes; ++i) {
auto idx = dense_node.template output<val_t>().idx;
vec[i] = tlf::Value::Create(vector_leaf[idx * ps.num_classes + i]);
}
builder->SetLeafVectorNode(key, vec);
break;
}
}
} else {
int left = root + 2 * (node - root) + 1;
int right = root + 2 * (node - root) + 2;
float threshold = dense_node.thresh();
bool default_left = dense_node.def_left();
switch (ps.op) {
case tl::Operator::kLT: break;
case tl::Operator::kLE:
// adjust the threshold
threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity());
break;
case tl::Operator::kGT:
// adjust the threshold; left and right still need to be swapped
threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity());
case tl::Operator::kGE:
// swap left and right
std::swap(left, right);
default_left = !default_left;
break;
default: ASSERT(false, "comparison operator must be <, >, <= or >=");
}
int left_key = node_to_treelite(builder, pkey, root, left);
int right_key = node_to_treelite(builder, pkey, root, right);
builder->SetNumericalTestNode(key,
dense_node.fid(),
ps.op,
tlf::Value::Create(threshold),
default_left,
left_key,
right_key);
}
return key;
}
void init_forest_impl(fil::forest_t* pforest, fil::storage_type_t storage_type)
{
bool random_forest_flag = (ps.output & fil::output_t::AVG) != 0;
int treelite_num_classes =
ps.leaf_algo == fil::leaf_algo_t::FLOAT_UNARY_BINARY ? 1 : ps.num_classes;
std::unique_ptr<tlf::ModelBuilder> model_builder(new tlf::ModelBuilder(ps.num_cols,
treelite_num_classes,
random_forest_flag,
tl::TypeInfo::kFloat32,
tl::TypeInfo::kFloat32));
// prediction transform
if ((ps.output & fil::output_t::SIGMOID) != 0) {
if (ps.num_classes > 2)
model_builder->SetModelParam("pred_transform", "multiclass_ova");
else
model_builder->SetModelParam("pred_transform", "sigmoid");
} else if (ps.leaf_algo != fil::leaf_algo_t::FLOAT_UNARY_BINARY) {
model_builder->SetModelParam("pred_transform", "max_index");
ps.output = fil::output_t(ps.output | fil::output_t::CLASS);
} else if (ps.leaf_algo == GROVE_PER_CLASS) {
model_builder->SetModelParam("pred_transform", "identity_multiclass");
} else {
model_builder->SetModelParam("pred_transform", "identity");
}
// global bias
char* global_bias_str = nullptr;
ASSERT(asprintf(&global_bias_str, "%f", double(ps.global_bias)) > 0,
"cannot convert global_bias into a string");
model_builder->SetModelParam("global_bias", global_bias_str);
::free(global_bias_str);
// build the trees
for (int i_tree = 0; i_tree < ps.num_trees; ++i_tree) {
tlf::TreeBuilder* tree_builder =
new tlf::TreeBuilder(tl::TypeInfo::kFloat32, tl::TypeInfo::kFloat32);
int key_counter = 0;
int root = i_tree * tree_num_nodes();
int root_key = node_to_treelite(tree_builder, &key_counter, root, root);
tree_builder->SetRootNode(root_key);
// InsertTree() consumes tree_builder
TL_CPP_CHECK(model_builder->InsertTree(tree_builder));
}
// commit the model
std::unique_ptr<tl::Model> model = model_builder->CommitModel();
// init FIL forest with the model
char* forest_shape_str = nullptr;
fil::treelite_params_t params;
params.algo = ps.algo;
params.threshold = ps.threshold;
params.output_class = (ps.output & fil::output_t::CLASS) != 0;
params.storage_type = storage_type;
params.blocks_per_sm = ps.blocks_per_sm;
params.threads_per_tree = ps.threads_per_tree;
params.n_items = ps.n_items;
params.pforest_shape_str = ps.print_forest_shape ? &forest_shape_str : nullptr;
fil::from_treelite(handle, pforest, (ModelHandle)model.get(), ¶ms);
CUDA_CHECK(cudaStreamSynchronize(stream));
if (ps.print_forest_shape) {
std::string str(forest_shape_str);
for (const char* substr : {"model size",
" MB",
"Depth histogram:",
"Avg nodes per tree",
"Leaf depth",
"Depth histogram fingerprint"}) {
ASSERT(str.find(substr) != std::string::npos,
"\"%s\" not found in forest shape :\n%s",
substr,
str.c_str());
}
}
::free(forest_shape_str);
}
};
class TreeliteDenseFilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override
{
init_forest_impl(pforest, fil::storage_type_t::DENSE);
}
};
class TreeliteSparse16FilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override
{
init_forest_impl(pforest, fil::storage_type_t::SPARSE);
}
};
class TreeliteSparse8FilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override
{
init_forest_impl(pforest, fil::storage_type_t::SPARSE8);
}
};
class TreeliteAutoFilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override
{
init_forest_impl(pforest, fil::storage_type_t::AUTO);
}
};
// test for failures; currently only supported for sparse8 nodes
class TreeliteThrowSparse8FilTest : public TreeliteSparse8FilTest {
protected:
// model import happens in check(), so this function is empty
void SetUp() override {}
void check() { ASSERT_THROW(setup_helper(), raft::exception); }
};
/** mechanism to use named aggregate initialization before C++20, and also use
the struct defaults. Using it directly only works if all defaulted
members come after ones explicitly mentioned.
**/
#define FIL_TEST_PARAMS(...) \
[]() { \
struct NonDefaultFilTestParams : public FilTestParams { \
NonDefaultFilTestParams() { __VA_ARGS__; } \
}; \
return FilTestParams(NonDefaultFilTestParams()); \
}()
// kEQ is intentionally unused, and kLT is default
static const tl::Operator kLE = tl::Operator::kLE;
static const tl::Operator kGT = tl::Operator::kGT;
static const tl::Operator kGE = tl::Operator::kGE;
std::vector<FilTestParams> predict_dense_inputs = {
FIL_TEST_PARAMS(),
FIL_TEST_PARAMS(algo = TREE_REORG),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(output = SIGMOID),
FIL_TEST_PARAMS(output = SIGMOID, algo = TREE_REORG),
FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, num_classes = 2),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = TREE_REORG, num_classes = 2),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = BATCH_TREE_REORG, num_classes = 2),
FIL_TEST_PARAMS(output = AVG),
FIL_TEST_PARAMS(output = AVG, algo = TREE_REORG),
FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2),
FIL_TEST_PARAMS(output = AVG_CLASS, algo = TREE_REORG, num_classes = 2),
FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, num_classes = 2),
FIL_TEST_PARAMS(global_bias = 0.5, algo = TREE_REORG),
FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5, algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(output = AVG, global_bias = 0.5),
FIL_TEST_PARAMS(
output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, algo = TREE_REORG, num_classes = 2),
FIL_TEST_PARAMS(output = SIGMOID, algo = ALGO_AUTO),
FIL_TEST_PARAMS(
output = AVG_CLASS, algo = BATCH_TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 5),
FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2),
FIL_TEST_PARAMS(algo = TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 5),
FIL_TEST_PARAMS(output = SIGMOID, leaf_algo = CATEGORICAL_LEAF, num_classes = 7),
FIL_TEST_PARAMS(
global_bias = 0.5, algo = TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 4),
FIL_TEST_PARAMS(output = AVG, global_bias = 0.5, leaf_algo = CATEGORICAL_LEAF, num_classes = 4),
FIL_TEST_PARAMS(
output = AVG_CLASS, algo = BATCH_TREE_REORG, leaf_algo = GROVE_PER_CLASS, num_classes = 5),
FIL_TEST_PARAMS(algo = TREE_REORG, leaf_algo = GROVE_PER_CLASS, num_classes = 5),
FIL_TEST_PARAMS(num_trees = 49, output = SIGMOID, leaf_algo = GROVE_PER_CLASS, num_classes = 7),
FIL_TEST_PARAMS(num_trees = 52,
global_bias = 0.5,
algo = TREE_REORG,
leaf_algo = GROVE_PER_CLASS,
num_classes = 4),
FIL_TEST_PARAMS(
num_trees = 52, output = AVG, global_bias = 0.5, leaf_algo = GROVE_PER_CLASS, num_classes = 4),
FIL_TEST_PARAMS(blocks_per_sm = 1),
FIL_TEST_PARAMS(blocks_per_sm = 4),
FIL_TEST_PARAMS(num_classes = 3, blocks_per_sm = 1, leaf_algo = CATEGORICAL_LEAF),
FIL_TEST_PARAMS(num_classes = 3, blocks_per_sm = 4, leaf_algo = CATEGORICAL_LEAF),
FIL_TEST_PARAMS(num_classes = 5, blocks_per_sm = 1, leaf_algo = GROVE_PER_CLASS),
FIL_TEST_PARAMS(num_classes = 5, blocks_per_sm = 4, leaf_algo = GROVE_PER_CLASS),
FIL_TEST_PARAMS(
leaf_algo = GROVE_PER_CLASS, blocks_per_sm = 1, num_trees = 512, num_classes = 512),
FIL_TEST_PARAMS(
leaf_algo = GROVE_PER_CLASS, blocks_per_sm = 4, num_trees = 512, num_classes = 512),
FIL_TEST_PARAMS(num_trees = 52, output = SOFTMAX, leaf_algo = GROVE_PER_CLASS, num_classes = 4),
FIL_TEST_PARAMS(
num_trees = 52, output = AVG_SOFTMAX, leaf_algo = GROVE_PER_CLASS, num_classes = 4),
FIL_TEST_PARAMS(num_trees = 3 * (FIL_TPB + 1),
output = SOFTMAX,
leaf_algo = GROVE_PER_CLASS,
num_classes = FIL_TPB + 1),
FIL_TEST_PARAMS(num_trees = 3 * (FIL_TPB + 1),
output = AVG_SOFTMAX,
leaf_algo = GROVE_PER_CLASS,
num_classes = FIL_TPB + 1),
FIL_TEST_PARAMS(num_cols = 100'000, depth = 5, num_trees = 1, leaf_algo = FLOAT_UNARY_BINARY),
FIL_TEST_PARAMS(num_rows = 101,
num_cols = 100'000,
depth = 5,
num_trees = 9,
algo = BATCH_TREE_REORG,
leaf_algo = GROVE_PER_CLASS,
num_classes = 3),
FIL_TEST_PARAMS(num_rows = 102,
num_cols = 100'000,
depth = 5,
num_trees = 3 * (FIL_TPB + 1),
algo = BATCH_TREE_REORG,
leaf_algo = GROVE_PER_CLASS,
num_classes = FIL_TPB + 1),
FIL_TEST_PARAMS(num_rows = 103,
num_cols = 100'000,
depth = 5,
num_trees = 1,
algo = BATCH_TREE_REORG,
leaf_algo = CATEGORICAL_LEAF,
num_classes = 3),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, threads_per_tree = 2),
FIL_TEST_PARAMS(algo = NAIVE, threads_per_tree = 4),
FIL_TEST_PARAMS(algo = TREE_REORG, threads_per_tree = 8),
FIL_TEST_PARAMS(algo = ALGO_AUTO, threads_per_tree = 16),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, threads_per_tree = 32),
FIL_TEST_PARAMS(algo = NAIVE, threads_per_tree = 64),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, threads_per_tree = 128, n_items = 3),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, threads_per_tree = 256),
FIL_TEST_PARAMS(algo = TREE_REORG, threads_per_tree = 32, n_items = 1),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, threads_per_tree = 16, n_items = 4),
FIL_TEST_PARAMS(algo = NAIVE, threads_per_tree = 32, n_items = 4),
FIL_TEST_PARAMS(
num_rows = 500, num_cols = 2000, algo = BATCH_TREE_REORG, threads_per_tree = 64, n_items = 4),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_classes = 2),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_trees = 9, num_classes = 20),
FIL_TEST_PARAMS(num_rows = 103,
num_cols = 100'000,
depth = 5,
num_trees = 1,
algo = BATCH_TREE_REORG,
leaf_algo = VECTOR_LEAF,
num_classes = 3),
FIL_TEST_PARAMS(num_rows = 103,
num_cols = 5,
depth = 5,
num_trees = 3,
leaf_algo = VECTOR_LEAF,
num_classes = 4000),
};
TEST_P(PredictDenseFilTest, Predict) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, PredictDenseFilTest, testing::ValuesIn(predict_dense_inputs));
std::vector<FilTestParams> predict_sparse_inputs = {
FIL_TEST_PARAMS(),
FIL_TEST_PARAMS(output = SIGMOID),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, num_classes = 2),
FIL_TEST_PARAMS(output = AVG),
FIL_TEST_PARAMS(output = AVG_CLASS, global_bias = 0.5, num_classes = 2),
FIL_TEST_PARAMS(global_bias = 0.5),
FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5),
FIL_TEST_PARAMS(output = AVG, global_bias = 0.5),
FIL_TEST_PARAMS(output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, num_classes = 2),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = ALGO_AUTO, num_classes = 2),
FIL_TEST_PARAMS(output = AVG_CLASS,
threshold = 1.0,
global_bias = 0.5,
leaf_algo = CATEGORICAL_LEAF,
num_classes = 5000),
FIL_TEST_PARAMS(global_bias = 0.5, leaf_algo = CATEGORICAL_LEAF, num_classes = 6),
FIL_TEST_PARAMS(output = CLASS, leaf_algo = CATEGORICAL_LEAF, num_classes = 3),
FIL_TEST_PARAMS(leaf_algo = CATEGORICAL_LEAF, num_classes = 3),
FIL_TEST_PARAMS(depth = 2,
num_trees = 5000,
output = AVG_CLASS,
threshold = 1.0,
global_bias = 0.5,
leaf_algo = GROVE_PER_CLASS,
num_classes = 5000),
FIL_TEST_PARAMS(num_trees = 60, global_bias = 0.5, leaf_algo = GROVE_PER_CLASS, num_classes = 6),
FIL_TEST_PARAMS(num_trees = 51, output = CLASS, leaf_algo = GROVE_PER_CLASS, num_classes = 3),
FIL_TEST_PARAMS(num_trees = 51, leaf_algo = GROVE_PER_CLASS, num_classes = 3),
FIL_TEST_PARAMS(algo = NAIVE, threads_per_tree = 2),
FIL_TEST_PARAMS(algo = NAIVE, threads_per_tree = 8, n_items = 1),
FIL_TEST_PARAMS(algo = ALGO_AUTO, threads_per_tree = 16, n_items = 1),
FIL_TEST_PARAMS(algo = ALGO_AUTO, threads_per_tree = 32),
FIL_TEST_PARAMS(num_cols = 1, num_trees = 1, algo = NAIVE, threads_per_tree = 64, n_items = 1),
FIL_TEST_PARAMS(num_rows = 500, num_cols = 2000, algo = NAIVE, threads_per_tree = 64),
FIL_TEST_PARAMS(
num_rows = 500, num_cols = 2000, algo = ALGO_AUTO, threads_per_tree = 256, n_items = 1),
FIL_TEST_PARAMS(num_trees = 51, leaf_algo = VECTOR_LEAF, num_classes = 15),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_trees = 9, num_classes = 20),
FIL_TEST_PARAMS(num_rows = 103,
num_cols = 1000,
depth = 5,
num_trees = 1,
leaf_algo = VECTOR_LEAF,
num_classes = 3),
FIL_TEST_PARAMS(num_rows = 103,
num_cols = 5,
depth = 5,
num_trees = 3,
leaf_algo = VECTOR_LEAF,
num_classes = 4000),
FIL_TEST_PARAMS(num_rows = 103,
num_cols = 5,
depth = 5,
num_trees = 530,
leaf_algo = VECTOR_LEAF,
num_classes = 11),
FIL_TEST_PARAMS(num_rows = 103,
num_cols = 5,
depth = 5,
num_trees = 530,
leaf_algo = VECTOR_LEAF,
num_classes = 1111),
};
TEST_P(PredictSparse16FilTest, Predict) { compare(); }
// Temporarily disabled, see https://github.com/rapidsai/cuml/issues/3205
INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse16FilTest, testing::ValuesIn(predict_sparse_inputs));
TEST_P(PredictSparse8FilTest, Predict) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse8FilTest, testing::ValuesIn(predict_sparse_inputs));
std::vector<FilTestParams> import_dense_inputs = {
FIL_TEST_PARAMS(),
FIL_TEST_PARAMS(output = SIGMOID, op = kLE),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, op = kGT, num_classes = 2),
FIL_TEST_PARAMS(output = AVG, op = kGE),
FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2),
FIL_TEST_PARAMS(algo = TREE_REORG, op = kLE),
FIL_TEST_PARAMS(output = SIGMOID, algo = TREE_REORG, op = kGT),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = TREE_REORG, op = kGE, num_classes = 2),
FIL_TEST_PARAMS(output = AVG, algo = TREE_REORG),
FIL_TEST_PARAMS(output = AVG_CLASS, algo = TREE_REORG, op = kLE, num_classes = 2),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, op = kLE),
FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG, op = kLE),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, op = kGT),
FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG, op = kGT),
FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, op = kGE),
FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG, op = kGE),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = BATCH_TREE_REORG, num_classes = 2),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = BATCH_TREE_REORG, op = kLE, num_classes = 2),
FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG, op = kLE),
FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, op = kGT, num_classes = 2),
FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, op = kGE, num_classes = 2),
FIL_TEST_PARAMS(global_bias = 0.5, algo = TREE_REORG),
FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5, algo = BATCH_TREE_REORG, op = kLE),
FIL_TEST_PARAMS(output = AVG, global_bias = 0.5, op = kGT),
FIL_TEST_PARAMS(output = AVG_CLASS,
threshold = 1.0,
global_bias = 0.5,
algo = TREE_REORG,
op = kGE,
num_classes = 2),
FIL_TEST_PARAMS(output = SIGMOID, algo = ALGO_AUTO, op = kLE),
FIL_TEST_PARAMS(output = SIGMOID, algo = ALGO_AUTO, op = kLE),
FIL_TEST_PARAMS(
output = AVG, algo = BATCH_TREE_REORG, op = kGE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5),
FIL_TEST_PARAMS(
output = AVG, algo = BATCH_TREE_REORG, op = kGT, leaf_algo = CATEGORICAL_LEAF, num_classes = 6),
FIL_TEST_PARAMS(
output = AVG, algo = BATCH_TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 3),
FIL_TEST_PARAMS(
output = AVG, algo = BATCH_TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5),
FIL_TEST_PARAMS(
output = AVG_CLASS, algo = TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5),
FIL_TEST_PARAMS(
output = AVG, algo = TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 7),
FIL_TEST_PARAMS(output = AVG, leaf_algo = CATEGORICAL_LEAF, num_classes = 6),
FIL_TEST_PARAMS(output = CLASS,
algo = BATCH_TREE_REORG,
op = kGE,
leaf_algo = GROVE_PER_CLASS,
num_classes = 5),
FIL_TEST_PARAMS(num_trees = 48,
output = CLASS,
algo = BATCH_TREE_REORG,
op = kGT,
leaf_algo = GROVE_PER_CLASS,
num_classes = 6),
FIL_TEST_PARAMS(num_trees = 51,
output = CLASS,
algo = BATCH_TREE_REORG,
op = kLE,
leaf_algo = GROVE_PER_CLASS,
num_classes = 3),
FIL_TEST_PARAMS(output = CLASS,
algo = BATCH_TREE_REORG,
op = kLE,
leaf_algo = GROVE_PER_CLASS,
num_classes = 5),
FIL_TEST_PARAMS(
output = CLASS, algo = TREE_REORG, op = kLE, leaf_algo = GROVE_PER_CLASS, num_classes = 5),
FIL_TEST_PARAMS(num_trees = 49,
output = CLASS,
algo = TREE_REORG,
op = kLE,
leaf_algo = GROVE_PER_CLASS,
num_classes = 7),
FIL_TEST_PARAMS(num_trees = 48, output = CLASS, leaf_algo = GROVE_PER_CLASS, num_classes = 6),
FIL_TEST_PARAMS(print_forest_shape = true),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_classes = 2),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_trees = 19, num_classes = 20),
};
TEST_P(TreeliteDenseFilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteDenseFilTest, testing::ValuesIn(import_dense_inputs));
std::vector<FilTestParams> import_sparse_inputs = {
FIL_TEST_PARAMS(),
FIL_TEST_PARAMS(output = SIGMOID, op = kLE),
FIL_TEST_PARAMS(output = SIGMOID_CLASS, op = kGT, num_classes = 2),
FIL_TEST_PARAMS(output = AVG, op = kGE),
FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2),
FIL_TEST_PARAMS(global_bias = 0.5),
FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5, op = kLE),
FIL_TEST_PARAMS(output = AVG, global_bias = 0.5, op = kGT),
FIL_TEST_PARAMS(
output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, op = kGE, num_classes = 2),
FIL_TEST_PARAMS(algo = ALGO_AUTO),
FIL_TEST_PARAMS(
output = AVG_CLASS, threshold = 1.0, op = kGE, leaf_algo = CATEGORICAL_LEAF, num_classes = 10),
FIL_TEST_PARAMS(output = AVG, algo = ALGO_AUTO, leaf_algo = CATEGORICAL_LEAF, num_classes = 4),
FIL_TEST_PARAMS(output = AVG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5),
FIL_TEST_PARAMS(output = AVG, leaf_algo = CATEGORICAL_LEAF, num_classes = 3),
FIL_TEST_PARAMS(output = CLASS,
threshold = 1.0,
global_bias = 0.5,
op = kGE,
leaf_algo = GROVE_PER_CLASS,
num_classes = 10),
FIL_TEST_PARAMS(
num_trees = 52, output = CLASS, algo = ALGO_AUTO, leaf_algo = GROVE_PER_CLASS, num_classes = 4),
FIL_TEST_PARAMS(output = CLASS, op = kLE, leaf_algo = GROVE_PER_CLASS, num_classes = 5),
FIL_TEST_PARAMS(num_trees = 51,
output = CLASS,
global_bias = 0.5,
leaf_algo = GROVE_PER_CLASS,
num_classes = 3),
FIL_TEST_PARAMS(num_trees = 51,
output = SIGMOID_CLASS,
global_bias = 0.5,
leaf_algo = GROVE_PER_CLASS,
num_classes = 3),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_classes = 2),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_trees = 19, num_classes = 20),
};
TEST_P(TreeliteSparse16FilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse16FilTest, testing::ValuesIn(import_sparse_inputs));
TEST_P(TreeliteSparse8FilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse8FilTest, testing::ValuesIn(import_sparse_inputs));
std::vector<FilTestParams> import_auto_inputs = {
FIL_TEST_PARAMS(depth = 10, algo = ALGO_AUTO),
FIL_TEST_PARAMS(depth = 15, algo = ALGO_AUTO),
FIL_TEST_PARAMS(depth = 19, algo = ALGO_AUTO),
FIL_TEST_PARAMS(depth = 19, algo = BATCH_TREE_REORG),
FIL_TEST_PARAMS(
depth = 10, output = AVG, algo = ALGO_AUTO, leaf_algo = CATEGORICAL_LEAF, num_classes = 3),
FIL_TEST_PARAMS(depth = 10,
num_trees = 51,
output = CLASS,
algo = ALGO_AUTO,
leaf_algo = GROVE_PER_CLASS,
num_classes = 3),
FIL_TEST_PARAMS(leaf_algo = VECTOR_LEAF, num_classes = 3, algo = ALGO_AUTO),
#if 0
FIL_TEST_PARAMS(depth = 19, output = AVG, algo = BATCH_TREE_REORG,
leaf_algo = CATEGORICAL_LEAF, num_classes = 6),
#endif
};
TEST_P(TreeliteAutoFilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteAutoFilTest, testing::ValuesIn(import_auto_inputs));
// adjust test parameters if the sparse8 format changes
std::vector<FilTestParams> import_throw_sparse8_inputs = {
// too many features
FIL_TEST_PARAMS(num_rows = 100, num_cols = 20000, depth = 10),
// too many tree nodes
FIL_TEST_PARAMS(depth = 16, num_trees = 5, leaf_prob = 0),
};
TEST_P(TreeliteThrowSparse8FilTest, Import) { check(); }
INSTANTIATE_TEST_CASE_P(FilTests,
TreeliteThrowSparse8FilTest,
testing::ValuesIn(import_throw_sparse8_inputs));
} // namespace ML
|
fde47972518e126cb2db2590383ecd1d5dcc6a36.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHAtomics.cuh"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
#include "THHNumerics.cuh"
#include "THHTensorTypeUtils.cuh"
#define OUTPUT_FEATURES_PER_THREAD 32
#define MAX_WARPS_PER_RUN 4
namespace detail {
/// Various utilities for dealing with arrays of values which are
/// maintained in thread-local registers. All accesses are done in such
/// a way such that the index is statically known, which preserves the
/// compiler's ability to allocate the values to registers, as opposed
/// to local memory.
template <typename T, int N>
struct RegisterUtils {
/// Register shifting: move elements towards the beginning of the
/// array (towards 0) by `Shift` places:
/// arr[i] = arr[i + Shift]
/// The `Shift` elements at the end are left unchanged.
template <int Shift>
__device__ __forceinline__ static void shiftLeft(T arr[N]) {
// e.g., N = 5, Shift = 2:
// 0 1 2 3 4 becomes =>
// 2 3 4 3 4 (last are unchanged)
#pragma unroll
for (int i = 0; i < N - Shift; ++i) {
arr[i] = arr[i + Shift];
}
}
};
template <typename T>
__device__ __forceinline__
int getDim1Point(const THCDeviceTensor<T, 4>& input) {
int threadPoint = blockIdx.x * blockDim.x + threadIdx.x;
return threadPoint / input.getSize(3);
}
template <typename T>
__device__ __forceinline__
int getDim2Point(const THCDeviceTensor<T, 4>& input) {
int threadPoint = blockIdx.x * blockDim.x + threadIdx.x;
return threadPoint % input.getSize(3);
}
__device__ __forceinline__
int getStartOutputFeature() {
return blockIdx.y * OUTPUT_FEATURES_PER_THREAD;
}
template <typename T>
__device__ __forceinline__
int getEndOutputFeature(const THCDeviceTensor<T, 4>& output) {
return min((blockIdx.y + 1) * OUTPUT_FEATURES_PER_THREAD, output.getSize(1));
}
__device__ __forceinline__
int getBatch() {
return blockIdx.z;
}
// All of these functions that follow are MathOps; they are template
// parameters so L2 can be more efficiently implemented
// template <typename T>
// typedef T (*MathOp)(const T in, const T arg);
template <typename T>
__device__ __forceinline__ T power2(const T in, const T power) {
return THCNumerics<T>::mul(in, in);
}
template <typename T>
__device__ __forceinline__ T root2(const T in, const T power) {
return THCNumerics<T>::sqrt(in);
}
template <typename T>
__device__ __forceinline__ T powerGrad2(const T in, const T power) {
return in;
}
template <typename T>
__device__ __forceinline__ T powerN(const T in, const T power) {
return THCNumerics<T>::pow(in, power);
}
template <typename T>
__device__ __forceinline__ T rootN(const T in, const T power) {
const T invPower = THCNumerics<T>::cinv(power);
return THCNumerics<T>::pow(in, invPower);
}
template <typename T>
__device__ __forceinline__ T powerGradN(const T in, const T power) {
return THCNumerics<T>::pow(in,
THCNumerics<T>::sub(power,
ScalarConvert<int, T>::to(1)));
}
// Input is of the form:
// [batch][feature dim][optional dim 1][optional dim 2]
template <typename T,
int Width,
int Stride,
T (*PowerFunc)(T in, T power),
T (*RootFunc)(T in, T power)>
__global__ void
featureLPPoolingUpdateOutput(const THCDeviceTensor<T, 4> input,
THCDeviceTensor<T, 4> output,
T power) {
// What non-feature points is this thread handling?
int dim1Point = getDim1Point(input);
int dim2Point = getDim2Point(input);
if (dim1Point >= input.getSize(2) || dim2Point >= input.getSize(3)) {
// This thread in the warp is out of bounds
return;
}
// What feature points is this thread handling?
int startOutputFeature = getStartOutputFeature();
int endOutputFeature = getEndOutputFeature(output);
int startInputFeature = startOutputFeature * Stride;
// What batch points is this thread handling?
int batch = getBatch();
// If stride >= width, then there is no loaded data reuse.
// If stride > 1 and stride < width, then shift by stride, since we
// can reuse Width - Stride elements from the previous round.
// e.g., width = 5, stride = 2,
// output 0 uses input 0 1 2 3 4
// output 1 uses input 2 3 4 5 6 (inputs 2 - 4 are reused, i.e., 5 -
// 2 elements are reused, and we have to shift the array by 2)
//
// e.g., width = 5, stride = 3,
// output 0 uses input 0 1 2 3 4
// output 1 uses input 3 4 5 6 7 (inputs 3 - 4 are reused, i.e., 5 - 3
// elements are reused, and we have to shift the array by 3)
// Valid only pooling: load Width elements from input (Width -
// Stride is handled here, at the top of the loop we handle the
// remaining Stride elements). We already verified that the input is
// larger than the width.
// `in` will contain the input values ^ power.
T in[Width];
#pragma unroll
for (int i = 0; i < Width - Stride; ++i) {
const T data =
input[batch][startInputFeature + i][dim1Point][dim2Point];
in[i] = PowerFunc(data, power);
}
for (int outputFeature = startOutputFeature;
outputFeature < endOutputFeature;
++outputFeature) {
// If Stride < Width, we're loading Stride new values starting at
// Width - Stride
// If Stride >= Width, we're loading Width new values starting at 0
if (Stride < Width) {
int nextInputFeature = outputFeature * Stride + Width - Stride;
#pragma unroll
for (int i = 0; i < Stride; ++i) {
const T data =
input[batch][nextInputFeature + i][dim1Point][dim2Point];
in[Width - Stride + i] = PowerFunc(data, power);
}
} else {
int nextInputFeature = outputFeature * Stride;
#pragma unroll
for (int i = 0; i < Width; ++i) {
T data = input[batch][nextInputFeature + i][dim1Point][dim2Point];
in[i] = PowerFunc(data, power);
}
}
// Calculate the new output feature
T val = ScalarConvert<int, T>::to(0);
for (int i = 0; i < Width; ++i) {
val = THCNumerics<T>::add(val, in[i]);
}
val = RootFunc(val, power);
output[batch][outputFeature][dim1Point][dim2Point] = val;
if (Stride < Width) {
// Shift registers for calculating the next point
RegisterUtils<T, Width>::template shiftLeft<Stride>(in);
}
}
}
// forward pass: f(a, ..., z) = (a^p + ... + z^p)^(1 / p)
// for bprop:
// partial df(a, ... z)/da = a^(p - 1) * (a^p + ... + z^p)^((1 / p) - 1) =
// a^(p - 1) * 1/(f(a, ..., z)^(p - 1)) = (a / f(a, ..., z))^(p - 1)
//
// example: for p = 2, df(a, ..., z)/da = a / f(a, ..., z)
// example: for p = 3, df(a, ..., z)/da = (a / f(a, ..., z))^2
//
// PowerGradFunc implements x^(p - 1)
template <typename T,
int Width,
int Stride,
T (*PowerGradFunc)(T in, T arg)>
__global__ void
featureLPPoolingUpdateGradInput(const THCDeviceTensor<T, 4> gradOutput,
const THCDeviceTensor<T, 4> input,
const THCDeviceTensor<T, 4> output,
THCDeviceTensor<T, 4> gradInput,
T power) {
// What non-feature points is this thread handling?
int dim1Point = getDim1Point(input);
int dim2Point = getDim2Point(input);
if (dim1Point >= input.getSize(2) || dim2Point >= input.getSize(3)) {
// This thread in the warp is out of bounds
return;
}
// What feature points is this thread handling? [start, end)
int startOutputFeature = getStartOutputFeature();
int endOutputFeature = getEndOutputFeature(output);
// What is the first input point that the output features depend
// upon? [start, end)
int startInputFeature = startOutputFeature * Stride;
int endInputFeature = endOutputFeature * Stride;
// What batch points is this thread handling?
int batch = getBatch();
// atomicAdd into gradInput is slow, avoid it where possible.
// We can do this because there is a range of gradInput elements
// that we are updating exclusively. This is how we find it
//
// width = 3 stride = 1 example:
// ------------------------------
// startOutputFeature for this thread
// |
// |
// previous thread's output feature
// | |
// | | gradOutput
// __v____v___________________
// | | | | | |
// ---------------------------
// |\ \_____
// | \__ \ gradInput
// __v____v____v_____________
// | | | | | |
// ---------------------------
// A A
// | |
// startInputFeature
// |
// exclusiveStartInputFeature
//
// exclusiveStartInputFeature is the first input feature that we can
// write into exclusively; the one right before it overlaps with
// updates from a previous thread and thus has to use atomicAdd.
int exclusiveStartInputFeature =
startInputFeature == 0 ?
// no thread is before ourselves
0 :
// there is a thread before ourselves
startInputFeature + (Width - 1) * Stride;
// Similarly, exclusiveEndInputFeature is the last input feature
// that we can write into exclusively, since we might be overlapping
// with the following thread
int exclusiveEndInputFeature =
endOutputFeature == output.getSize(1) ?
// no thread is after ourselves
endInputFeature + (Width - 1) * Stride :
// there is a thread after ourselves
endInputFeature;
// As with updateOutput preload input elements, except no need to
// transform them
T in[Width];
#pragma unroll
for (int i = 0; i < Width - Stride; ++i) {
in[i] = input[batch][startInputFeature + i][dim1Point][dim2Point];
}
for (int outputFeature = startOutputFeature;
outputFeature < endOutputFeature;
++outputFeature) {
// As with updateOutput load the subsequent input elements that we
// need, except no need to transform them
//
// If Stride < Width, we're loading Stride new values starting at
// Width - Stride
// If Stride >= Width, we're loading Width new values starting at 0
if (Stride < Width) {
int nextInputFeature = outputFeature * Stride + Width - Stride;
#pragma unroll
for (int i = 0; i < Stride; ++i) {
in[Width - Stride + i] =
input[batch][nextInputFeature + i][dim1Point][dim2Point];
}
} else {
int nextInputFeature = outputFeature * Stride;
#pragma unroll
for (int i = 0; i < Width; ++i) {
in[i] = input[batch][nextInputFeature + i][dim1Point][dim2Point];
}
}
// A given output feature gradient contributes to `Width` input
// gradients
const T gradOut =
gradOutput[batch][outputFeature][dim1Point][dim2Point];
// Load output (f(x_is)). It is possible that this is zero, in
// which case we'll ignore this point.
T out = output[batch][outputFeature][dim1Point][dim2Point];
if (THCNumerics<T>::eq(out, ScalarConvert<int, T>::to(0))) {
continue;
}
int curStartInputFeature = outputFeature * Stride;
int curEndInputFeature = outputFeature * Stride + Width - 1;
if (curStartInputFeature >= exclusiveStartInputFeature &&
curEndInputFeature < exclusiveEndInputFeature) {
// This thread is exclusively responsible for updating these
// input points, so we need not make the addition atomic
for (int i = 0; i < Width; ++i) {
int inputFeature = outputFeature * Stride + i;
// Calculate grad * (x_i / f(x_is))^(p - 1)
const T val = THCNumerics<T>::mul(
gradOut,
PowerGradFunc(THCNumerics<T>::div(in[i], out), power));
gradInput[batch][inputFeature][dim1Point][dim2Point] =
THCNumerics<T>::add(
gradInput[batch][inputFeature][dim1Point][dim2Point], val);
}
} else {
// Handle start and end boundary cases: potential overlap with
// other threads
for (int i = 0; i < Width; ++i) {
int inputFeature = outputFeature * Stride + i;
// Calculate grad * (x_i / f(x_is))^(p - 1)
T val = THCNumerics<T>::mul(
gradOut,
PowerGradFunc(THCNumerics<T>::div(in[i], out), power));
// We don't overlap other threads for this range
if (inputFeature >= exclusiveStartInputFeature &&
inputFeature < exclusiveEndInputFeature) {
gradInput[batch][inputFeature][dim1Point][dim2Point]
= THCNumerics<T>::add(
gradInput[batch][inputFeature][dim1Point][dim2Point], val);
} else {
// We are potentially overlapping with threads handling
// features before ourselves, so these need to be added atomically
atomicAdd(&gradInput[batch][inputFeature][dim1Point][dim2Point],
val);
}
}
}
if (Stride < Width) {
// Shift registers for calculating the next point
RegisterUtils<T, Width>::template shiftLeft<Stride>(in);
}
}
}
} // namespace detail
inline int lpPoolingOutputSize(int inputSize, int width, int stride) {
return ((inputSize - width) / stride) + 1;
}
template <typename T>
bool
runFeatureLPPoolingUpdateOutput(THCState* state,
const THCDeviceTensor<T, 4>& input,
THCDeviceTensor<T, 4>& output,
float power, int width, int stride) {
hipStream_t stream =
THCState_getCurrentStream(state);
const hipDeviceProp_t* deviceProperties =
THCState_getCurrentDeviceProperties(state);
int outputFeatures = ((input.getSize(1) - width) / stride) + 1;
THAssert(input.getSize(0) == output.getSize(0));
THAssert(outputFeatures == output.getSize(1));
THAssert(input.getSize(1) >= width);
THAssert(input.getSize(2) == output.getSize(2));
THAssert(input.getSize(3) == output.getSize(3));
THAssert(power > 0.0f);
THAssert(width >= 1);
THAssert(stride >= 1);
// Split non-features among threads and grid x
int totalNonFeatureSize = input.getSize(2) * input.getSize(3);
int numWarps =
min(THCCeilDiv(totalNonFeatureSize, deviceProperties->warpSize),
MAX_WARPS_PER_RUN);
int blockSize = deviceProperties->warpSize * numWarps;
// Split non-features among grid x
int nonFeatureSizeBlocks = THCCeilDiv(totalNonFeatureSize, blockSize);
// Split features among grid y, up to a maximum number of features per thread
int featureBlocks = THCCeilDiv(outputFeatures, OUTPUT_FEATURES_PER_THREAD);
// Split batch among grid z.
dim3 grid(nonFeatureSizeBlocks, featureBlocks, input.getSize(0));
dim3 block(blockSize);
#define L2_STRIDE_CASE(STRIDE, WIDTH) \
case STRIDE: \
hipLaunchKernelGGL(( detail::featureLPPoolingUpdateOutput<T, WIDTH, \
STRIDE, \
detail::power2, \
detail::root2>), dim3(grid), dim3(block), 0, stream, \
input, output, \
ScalarConvert<float, T>::to(power)); \
return true;
#define L2_WIDTH_CASE(WIDTH) \
case WIDTH: \
switch (stride) { \
L2_STRIDE_CASE(1, WIDTH); \
L2_STRIDE_CASE(2, WIDTH); \
L2_STRIDE_CASE(3, WIDTH); \
L2_STRIDE_CASE(4, WIDTH); \
}
#define LP_STRIDE_CASE(STRIDE, WIDTH) \
case STRIDE: \
hipLaunchKernelGGL(( detail::featureLPPoolingUpdateOutput<T, WIDTH, \
STRIDE, \
detail::powerN, \
detail::rootN>), dim3(grid), dim3(block), 0, stream, \
input, output, \
ScalarConvert<float, T>::to(power)); \
return true;
#define LP_WIDTH_CASE(WIDTH) \
case WIDTH: \
switch (stride) { \
LP_STRIDE_CASE(1, WIDTH); \
LP_STRIDE_CASE(2, WIDTH); \
LP_STRIDE_CASE(3, WIDTH); \
LP_STRIDE_CASE(4, WIDTH); \
}
if (power == 2.0f) {
switch (width) {
L2_WIDTH_CASE(2);
L2_WIDTH_CASE(3);
L2_WIDTH_CASE(4);
L2_WIDTH_CASE(5);
L2_WIDTH_CASE(6);
L2_WIDTH_CASE(7);
L2_WIDTH_CASE(8);
L2_WIDTH_CASE(9);
L2_WIDTH_CASE(10);
L2_WIDTH_CASE(11);
L2_WIDTH_CASE(12);
L2_WIDTH_CASE(13);
L2_WIDTH_CASE(14);
L2_WIDTH_CASE(15);
L2_WIDTH_CASE(16);
}
} else {
switch (width) {
LP_WIDTH_CASE(2);
LP_WIDTH_CASE(3);
LP_WIDTH_CASE(4);
LP_WIDTH_CASE(5);
LP_WIDTH_CASE(6);
LP_WIDTH_CASE(7);
LP_WIDTH_CASE(8);
LP_WIDTH_CASE(9);
LP_WIDTH_CASE(10);
LP_WIDTH_CASE(11);
LP_WIDTH_CASE(12);
LP_WIDTH_CASE(13);
LP_WIDTH_CASE(14);
LP_WIDTH_CASE(15);
LP_WIDTH_CASE(16);
}
}
// Otherwise, we have an unhandled width and/or stride.
return false;
#undef L2_STRIDE_CASE
#undef L2_WIDTH_CASE
#undef LP_STRIDE_CASE
#undef LP_WIDTH_CASE
}
template <typename T>
bool
runFeatureLPPoolingUpdateGradInput(THCState* state,
const THCDeviceTensor<T, 4>& gradOutput,
const THCDeviceTensor<T, 4>& input,
const THCDeviceTensor<T, 4>& output,
THCDeviceTensor<T, 4>& gradInput,
float power, int width, int stride) {
hipStream_t stream =
THCState_getCurrentStream(state);
const hipDeviceProp_t* deviceProperties =
THCState_getCurrentDeviceProperties(state);
for (int i = 0; i < 4; ++i) {
THAssert(gradOutput.getSize(i) == output.getSize(i));
THAssert(gradInput.getSize(i) == input.getSize(i));
}
int outputFeatures = ((input.getSize(1) - width) / stride) + 1;
THAssert(gradInput.getSize(0) == gradOutput.getSize(0));
THAssert(outputFeatures == gradOutput.getSize(1));
THAssert(gradInput.getSize(1) >= width);
THAssert(gradInput.getSize(2) == gradOutput.getSize(2));
THAssert(gradInput.getSize(3) == gradOutput.getSize(3));
THAssert(power > 0.0f);
THAssert(width >= 1);
THAssert(stride >= 1);
// Different threads are potentially adding into overlapping input
// points, so we must clear out gradInput before continuing.
gradInput.zero(stream);
// Split non-features among threads and grid x
int totalNonFeatureSize = input.getSize(2) * input.getSize(3);
int numWarps =
min(THCCeilDiv(totalNonFeatureSize, deviceProperties->warpSize),
MAX_WARPS_PER_RUN);
int blockSize = deviceProperties->warpSize * numWarps;
// Split non-features among grid x
int nonFeatureSizeBlocks = THCCeilDiv(totalNonFeatureSize, blockSize);
// Split features among grid y, up to a maximum number of features per thread
int featureBlocks = THCCeilDiv(outputFeatures, OUTPUT_FEATURES_PER_THREAD);
// Split batch among grid z.
dim3 grid(nonFeatureSizeBlocks, featureBlocks, input.getSize(0));
dim3 block(blockSize);
#define L2_STRIDE_CASE(STRIDE, WIDTH) \
case STRIDE: \
hipLaunchKernelGGL(( detail::featureLPPoolingUpdateGradInput< \
T, WIDTH, STRIDE, detail::powerGrad2>), dim3(grid), dim3(block), 0, stream, \
gradOutput, input, output, gradInput, \
ScalarConvert<float, T>::to(power)); \
return true;
#define L2_WIDTH_CASE(WIDTH) \
case WIDTH: \
switch (stride) { \
L2_STRIDE_CASE(1, WIDTH); \
L2_STRIDE_CASE(2, WIDTH); \
L2_STRIDE_CASE(3, WIDTH); \
L2_STRIDE_CASE(4, WIDTH); \
}
#define LP_STRIDE_CASE(STRIDE, WIDTH) \
case STRIDE: \
hipLaunchKernelGGL(( detail::featureLPPoolingUpdateGradInput< \
T, WIDTH, STRIDE, detail::powerGradN>), dim3(grid), dim3(block), 0, stream, \
gradOutput, input, output, gradInput, \
ScalarConvert<float, T>::to(power)); \
return true;
#define LP_WIDTH_CASE(WIDTH) \
case WIDTH: \
switch (stride) { \
LP_STRIDE_CASE(1, WIDTH); \
LP_STRIDE_CASE(2, WIDTH); \
LP_STRIDE_CASE(3, WIDTH); \
LP_STRIDE_CASE(4, WIDTH); \
}
if (power == 2.0f) {
switch (width) {
L2_WIDTH_CASE(2);
L2_WIDTH_CASE(3);
L2_WIDTH_CASE(4);
L2_WIDTH_CASE(5);
L2_WIDTH_CASE(6);
L2_WIDTH_CASE(7);
L2_WIDTH_CASE(8);
L2_WIDTH_CASE(9);
L2_WIDTH_CASE(10);
L2_WIDTH_CASE(11);
L2_WIDTH_CASE(12);
L2_WIDTH_CASE(13);
L2_WIDTH_CASE(14);
L2_WIDTH_CASE(15);
L2_WIDTH_CASE(16);
}
} else {
switch (width) {
LP_WIDTH_CASE(2);
LP_WIDTH_CASE(3);
LP_WIDTH_CASE(4);
LP_WIDTH_CASE(5);
LP_WIDTH_CASE(6);
LP_WIDTH_CASE(7);
LP_WIDTH_CASE(8);
LP_WIDTH_CASE(9);
LP_WIDTH_CASE(10);
LP_WIDTH_CASE(11);
LP_WIDTH_CASE(12);
LP_WIDTH_CASE(13);
LP_WIDTH_CASE(14);
LP_WIDTH_CASE(15);
LP_WIDTH_CASE(16);
}
}
// Otherwise, we have an unhandled width and/or stride.
return false;
#undef L2_STRIDE_CASE
#undef L2_WIDTH_CASE
#undef LP_STRIDE_CASE
#undef LP_WIDTH_CASE
}
#include "generic/FeatureLPPooling.cu"
#include "THHGenerateFloatTypes.h"
|
fde47972518e126cb2db2590383ecd1d5dcc6a36.cu
|
#include "THCUNN.h"
#include "THCAtomics.cuh"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
#include "THCNumerics.cuh"
#include "THCTensorTypeUtils.cuh"
#define OUTPUT_FEATURES_PER_THREAD 32
#define MAX_WARPS_PER_RUN 4
namespace detail {
/// Various utilities for dealing with arrays of values which are
/// maintained in thread-local registers. All accesses are done in such
/// a way such that the index is statically known, which preserves the
/// compiler's ability to allocate the values to registers, as opposed
/// to local memory.
template <typename T, int N>
struct RegisterUtils {
/// Register shifting: move elements towards the beginning of the
/// array (towards 0) by `Shift` places:
/// arr[i] = arr[i + Shift]
/// The `Shift` elements at the end are left unchanged.
template <int Shift>
__device__ __forceinline__ static void shiftLeft(T arr[N]) {
// e.g., N = 5, Shift = 2:
// 0 1 2 3 4 becomes =>
// 2 3 4 3 4 (last are unchanged)
#pragma unroll
for (int i = 0; i < N - Shift; ++i) {
arr[i] = arr[i + Shift];
}
}
};
template <typename T>
__device__ __forceinline__
int getDim1Point(const THCDeviceTensor<T, 4>& input) {
int threadPoint = blockIdx.x * blockDim.x + threadIdx.x;
return threadPoint / input.getSize(3);
}
template <typename T>
__device__ __forceinline__
int getDim2Point(const THCDeviceTensor<T, 4>& input) {
int threadPoint = blockIdx.x * blockDim.x + threadIdx.x;
return threadPoint % input.getSize(3);
}
__device__ __forceinline__
int getStartOutputFeature() {
return blockIdx.y * OUTPUT_FEATURES_PER_THREAD;
}
template <typename T>
__device__ __forceinline__
int getEndOutputFeature(const THCDeviceTensor<T, 4>& output) {
return min((blockIdx.y + 1) * OUTPUT_FEATURES_PER_THREAD, output.getSize(1));
}
__device__ __forceinline__
int getBatch() {
return blockIdx.z;
}
// All of these functions that follow are MathOps; they are template
// parameters so L2 can be more efficiently implemented
// template <typename T>
// typedef T (*MathOp)(const T in, const T arg);
template <typename T>
__device__ __forceinline__ T power2(const T in, const T power) {
return THCNumerics<T>::mul(in, in);
}
template <typename T>
__device__ __forceinline__ T root2(const T in, const T power) {
return THCNumerics<T>::sqrt(in);
}
template <typename T>
__device__ __forceinline__ T powerGrad2(const T in, const T power) {
return in;
}
template <typename T>
__device__ __forceinline__ T powerN(const T in, const T power) {
return THCNumerics<T>::pow(in, power);
}
template <typename T>
__device__ __forceinline__ T rootN(const T in, const T power) {
const T invPower = THCNumerics<T>::cinv(power);
return THCNumerics<T>::pow(in, invPower);
}
template <typename T>
__device__ __forceinline__ T powerGradN(const T in, const T power) {
return THCNumerics<T>::pow(in,
THCNumerics<T>::sub(power,
ScalarConvert<int, T>::to(1)));
}
// Input is of the form:
// [batch][feature dim][optional dim 1][optional dim 2]
template <typename T,
int Width,
int Stride,
T (*PowerFunc)(T in, T power),
T (*RootFunc)(T in, T power)>
__global__ void
featureLPPoolingUpdateOutput(const THCDeviceTensor<T, 4> input,
THCDeviceTensor<T, 4> output,
T power) {
// What non-feature points is this thread handling?
int dim1Point = getDim1Point(input);
int dim2Point = getDim2Point(input);
if (dim1Point >= input.getSize(2) || dim2Point >= input.getSize(3)) {
// This thread in the warp is out of bounds
return;
}
// What feature points is this thread handling?
int startOutputFeature = getStartOutputFeature();
int endOutputFeature = getEndOutputFeature(output);
int startInputFeature = startOutputFeature * Stride;
// What batch points is this thread handling?
int batch = getBatch();
// If stride >= width, then there is no loaded data reuse.
// If stride > 1 and stride < width, then shift by stride, since we
// can reuse Width - Stride elements from the previous round.
// e.g., width = 5, stride = 2,
// output 0 uses input 0 1 2 3 4
// output 1 uses input 2 3 4 5 6 (inputs 2 - 4 are reused, i.e., 5 -
// 2 elements are reused, and we have to shift the array by 2)
//
// e.g., width = 5, stride = 3,
// output 0 uses input 0 1 2 3 4
// output 1 uses input 3 4 5 6 7 (inputs 3 - 4 are reused, i.e., 5 - 3
// elements are reused, and we have to shift the array by 3)
// Valid only pooling: load Width elements from input (Width -
// Stride is handled here, at the top of the loop we handle the
// remaining Stride elements). We already verified that the input is
// larger than the width.
// `in` will contain the input values ^ power.
T in[Width];
#pragma unroll
for (int i = 0; i < Width - Stride; ++i) {
const T data =
input[batch][startInputFeature + i][dim1Point][dim2Point];
in[i] = PowerFunc(data, power);
}
for (int outputFeature = startOutputFeature;
outputFeature < endOutputFeature;
++outputFeature) {
// If Stride < Width, we're loading Stride new values starting at
// Width - Stride
// If Stride >= Width, we're loading Width new values starting at 0
if (Stride < Width) {
int nextInputFeature = outputFeature * Stride + Width - Stride;
#pragma unroll
for (int i = 0; i < Stride; ++i) {
const T data =
input[batch][nextInputFeature + i][dim1Point][dim2Point];
in[Width - Stride + i] = PowerFunc(data, power);
}
} else {
int nextInputFeature = outputFeature * Stride;
#pragma unroll
for (int i = 0; i < Width; ++i) {
T data = input[batch][nextInputFeature + i][dim1Point][dim2Point];
in[i] = PowerFunc(data, power);
}
}
// Calculate the new output feature
T val = ScalarConvert<int, T>::to(0);
for (int i = 0; i < Width; ++i) {
val = THCNumerics<T>::add(val, in[i]);
}
val = RootFunc(val, power);
output[batch][outputFeature][dim1Point][dim2Point] = val;
if (Stride < Width) {
// Shift registers for calculating the next point
RegisterUtils<T, Width>::template shiftLeft<Stride>(in);
}
}
}
// forward pass: f(a, ..., z) = (a^p + ... + z^p)^(1 / p)
// for bprop:
// partial df(a, ... z)/da = a^(p - 1) * (a^p + ... + z^p)^((1 / p) - 1) =
// a^(p - 1) * 1/(f(a, ..., z)^(p - 1)) = (a / f(a, ..., z))^(p - 1)
//
// example: for p = 2, df(a, ..., z)/da = a / f(a, ..., z)
// example: for p = 3, df(a, ..., z)/da = (a / f(a, ..., z))^2
//
// PowerGradFunc implements x^(p - 1)
template <typename T,
int Width,
int Stride,
T (*PowerGradFunc)(T in, T arg)>
__global__ void
featureLPPoolingUpdateGradInput(const THCDeviceTensor<T, 4> gradOutput,
const THCDeviceTensor<T, 4> input,
const THCDeviceTensor<T, 4> output,
THCDeviceTensor<T, 4> gradInput,
T power) {
// What non-feature points is this thread handling?
int dim1Point = getDim1Point(input);
int dim2Point = getDim2Point(input);
if (dim1Point >= input.getSize(2) || dim2Point >= input.getSize(3)) {
// This thread in the warp is out of bounds
return;
}
// What feature points is this thread handling? [start, end)
int startOutputFeature = getStartOutputFeature();
int endOutputFeature = getEndOutputFeature(output);
// What is the first input point that the output features depend
// upon? [start, end)
int startInputFeature = startOutputFeature * Stride;
int endInputFeature = endOutputFeature * Stride;
// What batch points is this thread handling?
int batch = getBatch();
// atomicAdd into gradInput is slow, avoid it where possible.
// We can do this because there is a range of gradInput elements
// that we are updating exclusively. This is how we find it
//
// width = 3 stride = 1 example:
// ------------------------------
// startOutputFeature for this thread
// |
// |
// previous thread's output feature
// | |
// | | gradOutput
// __v____v___________________
// | | | | | |
// ---------------------------
// |\ \_____
// | \__ \ gradInput
// __v____v____v_____________
// | | | | | |
// ---------------------------
// A A
// | |
// startInputFeature
// |
// exclusiveStartInputFeature
//
// exclusiveStartInputFeature is the first input feature that we can
// write into exclusively; the one right before it overlaps with
// updates from a previous thread and thus has to use atomicAdd.
int exclusiveStartInputFeature =
startInputFeature == 0 ?
// no thread is before ourselves
0 :
// there is a thread before ourselves
startInputFeature + (Width - 1) * Stride;
// Similarly, exclusiveEndInputFeature is the last input feature
// that we can write into exclusively, since we might be overlapping
// with the following thread
int exclusiveEndInputFeature =
endOutputFeature == output.getSize(1) ?
// no thread is after ourselves
endInputFeature + (Width - 1) * Stride :
// there is a thread after ourselves
endInputFeature;
// As with updateOutput preload input elements, except no need to
// transform them
T in[Width];
#pragma unroll
for (int i = 0; i < Width - Stride; ++i) {
in[i] = input[batch][startInputFeature + i][dim1Point][dim2Point];
}
for (int outputFeature = startOutputFeature;
outputFeature < endOutputFeature;
++outputFeature) {
// As with updateOutput load the subsequent input elements that we
// need, except no need to transform them
//
// If Stride < Width, we're loading Stride new values starting at
// Width - Stride
// If Stride >= Width, we're loading Width new values starting at 0
if (Stride < Width) {
int nextInputFeature = outputFeature * Stride + Width - Stride;
#pragma unroll
for (int i = 0; i < Stride; ++i) {
in[Width - Stride + i] =
input[batch][nextInputFeature + i][dim1Point][dim2Point];
}
} else {
int nextInputFeature = outputFeature * Stride;
#pragma unroll
for (int i = 0; i < Width; ++i) {
in[i] = input[batch][nextInputFeature + i][dim1Point][dim2Point];
}
}
// A given output feature gradient contributes to `Width` input
// gradients
const T gradOut =
gradOutput[batch][outputFeature][dim1Point][dim2Point];
// Load output (f(x_is)). It is possible that this is zero, in
// which case we'll ignore this point.
T out = output[batch][outputFeature][dim1Point][dim2Point];
if (THCNumerics<T>::eq(out, ScalarConvert<int, T>::to(0))) {
continue;
}
int curStartInputFeature = outputFeature * Stride;
int curEndInputFeature = outputFeature * Stride + Width - 1;
if (curStartInputFeature >= exclusiveStartInputFeature &&
curEndInputFeature < exclusiveEndInputFeature) {
// This thread is exclusively responsible for updating these
// input points, so we need not make the addition atomic
for (int i = 0; i < Width; ++i) {
int inputFeature = outputFeature * Stride + i;
// Calculate grad * (x_i / f(x_is))^(p - 1)
const T val = THCNumerics<T>::mul(
gradOut,
PowerGradFunc(THCNumerics<T>::div(in[i], out), power));
gradInput[batch][inputFeature][dim1Point][dim2Point] =
THCNumerics<T>::add(
gradInput[batch][inputFeature][dim1Point][dim2Point], val);
}
} else {
// Handle start and end boundary cases: potential overlap with
// other threads
for (int i = 0; i < Width; ++i) {
int inputFeature = outputFeature * Stride + i;
// Calculate grad * (x_i / f(x_is))^(p - 1)
T val = THCNumerics<T>::mul(
gradOut,
PowerGradFunc(THCNumerics<T>::div(in[i], out), power));
// We don't overlap other threads for this range
if (inputFeature >= exclusiveStartInputFeature &&
inputFeature < exclusiveEndInputFeature) {
gradInput[batch][inputFeature][dim1Point][dim2Point]
= THCNumerics<T>::add(
gradInput[batch][inputFeature][dim1Point][dim2Point], val);
} else {
// We are potentially overlapping with threads handling
// features before ourselves, so these need to be added atomically
atomicAdd(&gradInput[batch][inputFeature][dim1Point][dim2Point],
val);
}
}
}
if (Stride < Width) {
// Shift registers for calculating the next point
RegisterUtils<T, Width>::template shiftLeft<Stride>(in);
}
}
}
} // namespace detail
inline int lpPoolingOutputSize(int inputSize, int width, int stride) {
return ((inputSize - width) / stride) + 1;
}
template <typename T>
bool
runFeatureLPPoolingUpdateOutput(THCState* state,
const THCDeviceTensor<T, 4>& input,
THCDeviceTensor<T, 4>& output,
float power, int width, int stride) {
cudaStream_t stream =
THCState_getCurrentStream(state);
const cudaDeviceProp* deviceProperties =
THCState_getCurrentDeviceProperties(state);
int outputFeatures = ((input.getSize(1) - width) / stride) + 1;
THAssert(input.getSize(0) == output.getSize(0));
THAssert(outputFeatures == output.getSize(1));
THAssert(input.getSize(1) >= width);
THAssert(input.getSize(2) == output.getSize(2));
THAssert(input.getSize(3) == output.getSize(3));
THAssert(power > 0.0f);
THAssert(width >= 1);
THAssert(stride >= 1);
// Split non-features among threads and grid x
int totalNonFeatureSize = input.getSize(2) * input.getSize(3);
int numWarps =
min(THCCeilDiv(totalNonFeatureSize, deviceProperties->warpSize),
MAX_WARPS_PER_RUN);
int blockSize = deviceProperties->warpSize * numWarps;
// Split non-features among grid x
int nonFeatureSizeBlocks = THCCeilDiv(totalNonFeatureSize, blockSize);
// Split features among grid y, up to a maximum number of features per thread
int featureBlocks = THCCeilDiv(outputFeatures, OUTPUT_FEATURES_PER_THREAD);
// Split batch among grid z.
dim3 grid(nonFeatureSizeBlocks, featureBlocks, input.getSize(0));
dim3 block(blockSize);
#define L2_STRIDE_CASE(STRIDE, WIDTH) \
case STRIDE: \
detail:: \
featureLPPoolingUpdateOutput<T, WIDTH, \
STRIDE, \
detail::power2, \
detail::root2><<<grid, block, 0, stream>>>( \
input, output, \
ScalarConvert<float, T>::to(power)); \
return true;
#define L2_WIDTH_CASE(WIDTH) \
case WIDTH: \
switch (stride) { \
L2_STRIDE_CASE(1, WIDTH); \
L2_STRIDE_CASE(2, WIDTH); \
L2_STRIDE_CASE(3, WIDTH); \
L2_STRIDE_CASE(4, WIDTH); \
}
#define LP_STRIDE_CASE(STRIDE, WIDTH) \
case STRIDE: \
detail:: \
featureLPPoolingUpdateOutput<T, WIDTH, \
STRIDE, \
detail::powerN, \
detail::rootN><<<grid, block, 0, stream>>>( \
input, output, \
ScalarConvert<float, T>::to(power)); \
return true;
#define LP_WIDTH_CASE(WIDTH) \
case WIDTH: \
switch (stride) { \
LP_STRIDE_CASE(1, WIDTH); \
LP_STRIDE_CASE(2, WIDTH); \
LP_STRIDE_CASE(3, WIDTH); \
LP_STRIDE_CASE(4, WIDTH); \
}
if (power == 2.0f) {
switch (width) {
L2_WIDTH_CASE(2);
L2_WIDTH_CASE(3);
L2_WIDTH_CASE(4);
L2_WIDTH_CASE(5);
L2_WIDTH_CASE(6);
L2_WIDTH_CASE(7);
L2_WIDTH_CASE(8);
L2_WIDTH_CASE(9);
L2_WIDTH_CASE(10);
L2_WIDTH_CASE(11);
L2_WIDTH_CASE(12);
L2_WIDTH_CASE(13);
L2_WIDTH_CASE(14);
L2_WIDTH_CASE(15);
L2_WIDTH_CASE(16);
}
} else {
switch (width) {
LP_WIDTH_CASE(2);
LP_WIDTH_CASE(3);
LP_WIDTH_CASE(4);
LP_WIDTH_CASE(5);
LP_WIDTH_CASE(6);
LP_WIDTH_CASE(7);
LP_WIDTH_CASE(8);
LP_WIDTH_CASE(9);
LP_WIDTH_CASE(10);
LP_WIDTH_CASE(11);
LP_WIDTH_CASE(12);
LP_WIDTH_CASE(13);
LP_WIDTH_CASE(14);
LP_WIDTH_CASE(15);
LP_WIDTH_CASE(16);
}
}
// Otherwise, we have an unhandled width and/or stride.
return false;
#undef L2_STRIDE_CASE
#undef L2_WIDTH_CASE
#undef LP_STRIDE_CASE
#undef LP_WIDTH_CASE
}
template <typename T>
bool
runFeatureLPPoolingUpdateGradInput(THCState* state,
const THCDeviceTensor<T, 4>& gradOutput,
const THCDeviceTensor<T, 4>& input,
const THCDeviceTensor<T, 4>& output,
THCDeviceTensor<T, 4>& gradInput,
float power, int width, int stride) {
cudaStream_t stream =
THCState_getCurrentStream(state);
const cudaDeviceProp* deviceProperties =
THCState_getCurrentDeviceProperties(state);
for (int i = 0; i < 4; ++i) {
THAssert(gradOutput.getSize(i) == output.getSize(i));
THAssert(gradInput.getSize(i) == input.getSize(i));
}
int outputFeatures = ((input.getSize(1) - width) / stride) + 1;
THAssert(gradInput.getSize(0) == gradOutput.getSize(0));
THAssert(outputFeatures == gradOutput.getSize(1));
THAssert(gradInput.getSize(1) >= width);
THAssert(gradInput.getSize(2) == gradOutput.getSize(2));
THAssert(gradInput.getSize(3) == gradOutput.getSize(3));
THAssert(power > 0.0f);
THAssert(width >= 1);
THAssert(stride >= 1);
// Different threads are potentially adding into overlapping input
// points, so we must clear out gradInput before continuing.
gradInput.zero(stream);
// Split non-features among threads and grid x
int totalNonFeatureSize = input.getSize(2) * input.getSize(3);
int numWarps =
min(THCCeilDiv(totalNonFeatureSize, deviceProperties->warpSize),
MAX_WARPS_PER_RUN);
int blockSize = deviceProperties->warpSize * numWarps;
// Split non-features among grid x
int nonFeatureSizeBlocks = THCCeilDiv(totalNonFeatureSize, blockSize);
// Split features among grid y, up to a maximum number of features per thread
int featureBlocks = THCCeilDiv(outputFeatures, OUTPUT_FEATURES_PER_THREAD);
// Split batch among grid z.
dim3 grid(nonFeatureSizeBlocks, featureBlocks, input.getSize(0));
dim3 block(blockSize);
#define L2_STRIDE_CASE(STRIDE, WIDTH) \
case STRIDE: \
detail:: \
featureLPPoolingUpdateGradInput< \
T, WIDTH, STRIDE, detail::powerGrad2><<<grid, block, 0, stream>>>( \
gradOutput, input, output, gradInput, \
ScalarConvert<float, T>::to(power)); \
return true;
#define L2_WIDTH_CASE(WIDTH) \
case WIDTH: \
switch (stride) { \
L2_STRIDE_CASE(1, WIDTH); \
L2_STRIDE_CASE(2, WIDTH); \
L2_STRIDE_CASE(3, WIDTH); \
L2_STRIDE_CASE(4, WIDTH); \
}
#define LP_STRIDE_CASE(STRIDE, WIDTH) \
case STRIDE: \
detail:: \
featureLPPoolingUpdateGradInput< \
T, WIDTH, STRIDE, detail::powerGradN><<<grid, block, 0, stream>>>( \
gradOutput, input, output, gradInput, \
ScalarConvert<float, T>::to(power)); \
return true;
#define LP_WIDTH_CASE(WIDTH) \
case WIDTH: \
switch (stride) { \
LP_STRIDE_CASE(1, WIDTH); \
LP_STRIDE_CASE(2, WIDTH); \
LP_STRIDE_CASE(3, WIDTH); \
LP_STRIDE_CASE(4, WIDTH); \
}
if (power == 2.0f) {
switch (width) {
L2_WIDTH_CASE(2);
L2_WIDTH_CASE(3);
L2_WIDTH_CASE(4);
L2_WIDTH_CASE(5);
L2_WIDTH_CASE(6);
L2_WIDTH_CASE(7);
L2_WIDTH_CASE(8);
L2_WIDTH_CASE(9);
L2_WIDTH_CASE(10);
L2_WIDTH_CASE(11);
L2_WIDTH_CASE(12);
L2_WIDTH_CASE(13);
L2_WIDTH_CASE(14);
L2_WIDTH_CASE(15);
L2_WIDTH_CASE(16);
}
} else {
switch (width) {
LP_WIDTH_CASE(2);
LP_WIDTH_CASE(3);
LP_WIDTH_CASE(4);
LP_WIDTH_CASE(5);
LP_WIDTH_CASE(6);
LP_WIDTH_CASE(7);
LP_WIDTH_CASE(8);
LP_WIDTH_CASE(9);
LP_WIDTH_CASE(10);
LP_WIDTH_CASE(11);
LP_WIDTH_CASE(12);
LP_WIDTH_CASE(13);
LP_WIDTH_CASE(14);
LP_WIDTH_CASE(15);
LP_WIDTH_CASE(16);
}
}
// Otherwise, we have an unhandled width and/or stride.
return false;
#undef L2_STRIDE_CASE
#undef L2_WIDTH_CASE
#undef LP_STRIDE_CASE
#undef LP_WIDTH_CASE
}
#include "generic/FeatureLPPooling.cu"
#include "THCGenerateFloatTypes.h"
|
359ecd604cffd9c347f5a2e9f0a76c093c9de1cc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/core/Array.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <c10/hip/HIPMathCompat.h>
namespace at {
namespace native {
// -----------------------------------
// prelu forward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_kernel_share_weights(
const Tensor& input,
Tensor& result,
const scalar_t* weight_data)
{
auto iter = TensorIterator::unary_op(result, input);
at::native::gpu_kernel(iter,
[weight_data] GPU_LAMBDA (scalar_t input_val) {
return (input_val > 0) ? input_val : *weight_data * input_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_kernel_multi_weights(
scalar_t* result_data,
const scalar_t* input_data,
const scalar_t* weight_data,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
// multiply values at each channel with weight[channel_index]
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val;
}
Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
int64_t weight_num = weight.numel();
Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto strides = input.strides();
// case1: shared weight for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_share_weights<scalar_t>(
input,
result,
weight.data_ptr<scalar_t>());
});
}
else { // case2: multiple weights, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
hipLaunchKernelGGL(( prelu_cuda_kernel_multi_weights<scalar_t>)
, dim3(grid), dim3(block), 0, stream,
result.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return result;
}
// -----------------------------------
// prelu backward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_backward_kernel_share_weights(
const Tensor& input,
const Tensor& grad_out,
Tensor& input_grad,
Tensor& weight_grad_collector,
const scalar_t* weight_data) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(input_grad)
.add_output(weight_grad_collector)
.add_input(input)
.add_input(grad_out)
.build();
// N.B. `std::tuple` does not support `::operator=` on device code.
gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple<scalar_t, scalar_t> {
scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out;
scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out;
return {input_grad, weight_grad_collector};
});
}
template <typename scalar_t>
__global__ void prelu_cuda_backward_kernel_multi_weights(
const scalar_t* input_data,
const scalar_t* weight_data,
const scalar_t* grad_out_data,
scalar_t* input_grad_data,
scalar_t* weight_grad_collector,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
scalar_t grad_out_data_val = grad_out_data[linearId];
input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val;
weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val;
}
std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(grad_out_.is_cuda());
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto grad_out = grad_out_.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
TORCH_CHECK(grad_out.is_contiguous());
int64_t weight_num = weight.numel();
auto strides = input.strides();
auto dims = input.dim();
Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// case1: shared parameter for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_share_weights<scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
weight.data_ptr<scalar_t>());
});
weight_grad.fill_(weight_grad_collector.sum());
}
else { // case2: multiple parameters, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
hipLaunchKernelGGL(( prelu_cuda_backward_kernel_multi_weights<scalar_t>)
, dim3(grid), dim3(block), 0, stream,
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
grad_out.data_ptr<scalar_t>(),
input_grad.data_ptr<scalar_t>(),
weight_grad_collector.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
// update weight_grad
std::vector<int64_t> reduce_dims;
reduce_dims.push_back(0);
if (dims > 2) {
for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i);
}
weight_grad = weight_grad_collector.sum(reduce_dims);
}
return std::tuple<Tensor, Tensor>{input_grad, weight_grad};
}
// -----------------------------------
// hardshrink
// -----------------------------------
void hardshrink_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
});
});
}
void softshrink_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));
});
});
}
void shrink_backward_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t {
return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val;
});
});
}
void hardtanh_backward_kernel(TensorIterator& iter, const Scalar& min, const Scalar& max) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() {
auto min_val = min.to<scalar_t>();
auto max_val = max.to<scalar_t>();
gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a;
});
});
}
void softplus_kernel(TensorIterator& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(::exp(a * beta))) / beta;
});
});
}
void softplus_backward_kernel(TensorIterator& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t z = ::exp(b * beta);
return (b * beta) > threshold ? a : a * z / (z + scalar_t(1.));
});
});
}
template <typename scalar_t>
void threshold_kernel_impl(TensorIteratorBase& iter, scalar_t threshold, scalar_t value) {
gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
});
}
static void threshold_kernel_cuda(TensorIteratorBase& iter, const Scalar& threshold, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] {
threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>());
});
}
void elu_kernel(TensorIterator& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a * poscoef : (static_cast<scalar_t>(::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef;
});
});
}
void elu_backward_kernel(TensorIterator& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale, bool is_result) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
if (is_result) {
return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef;
} else {
return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast<scalar_t>(::exp(b * negiptcoef))) : a * poscoef;
}
});
});
}
namespace {
void GeluCUDAKernelImpl(TensorIterator& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
return static_cast<T_ACC>(x) *
c10::hip::compat::normcdf(static_cast<T_ACC>(x));
});
});
}
void GeluBackwardCUDAKernelImpl(TensorIterator& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5);
const T_ACC cdf = c10::hip::compat::normcdf(static_cast<T_ACC>(x));
const T_ACC pdf =
c10::hip::compat::exp(
T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) *
kBeta;
return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf);
});
});
}
void leaky_relu_kernel(TensorIterator& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a : a * negval;
});
});
}
void leaky_relu_backward_kernel(TensorIterator& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a > scalar_t(0) ? b : b * negval;
});
});
}
void hardswish_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return x * ::min(::max(x + three, zero), six) * one_sixth;
});
});
}
void hardswish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_half(0.5f);
gpu_kernel(
iter,
[zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
if (self_val < neg_three) {
return zero;
} else if (self_val <= three) {
return grad_val * ((self_val / three) + one_half);
} else {
return grad_val;
}
});
});
}
void hardsigmoid_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return ::min(::max(x + three, zero), six) * one_sixth;
});
});
}
void hardsigmoid_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_sixth(1.0f / 6.0f);
gpu_kernel(
iter,
[zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
return (self_val > neg_three && self_val < three)
? grad_val * one_sixth
: zero;
});
});
}
void silu_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC x_acc = static_cast<T_ACC>(x);
return x_acc / (T_ACC(1) + c10::hip::compat::exp(-x_acc));
});
});
}
void silu_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_backward_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
const T_ACC s_acc =
T_ACC(1) / (T_ACC(1) + c10::hip::compat::exp(-x_acc));
return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc));
});
});
}
} // namespace
Tensor gelu_cuda(const Tensor& self) {
Tensor Y = at::native::empty_like(
self,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto it = TensorIterator::unary_op(Y, self);
GeluCUDAKernelImpl(it);
return Y;
}
Tensor gelu_backward_cuda(const Tensor& grad, const Tensor& self) {
Tensor dX = at::native::empty_like(
self,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto it = TensorIterator::binary_op(dX, grad, self);
GeluBackwardCUDAKernelImpl(it);
return dX;
}
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
REGISTER_DISPATCH(elu_stub, &elu_kernel);
REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);
REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel);
REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel);
REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);
REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);
REGISTER_DISPATCH(softplus_stub, &softplus_kernel);
REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);
REGISTER_DISPATCH(silu_stub, &silu_kernel);
REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel);
REGISTER_DISPATCH(threshold_stub, &threshold_kernel_cuda);
} // namespace native
} // namespace at
|
359ecd604cffd9c347f5a2e9f0a76c093c9de1cc.cu
|
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/core/Array.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <c10/cuda/CUDAMathCompat.h>
namespace at {
namespace native {
// -----------------------------------
// prelu forward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_kernel_share_weights(
const Tensor& input,
Tensor& result,
const scalar_t* weight_data)
{
auto iter = TensorIterator::unary_op(result, input);
at::native::gpu_kernel(iter,
[weight_data] GPU_LAMBDA (scalar_t input_val) {
return (input_val > 0) ? input_val : *weight_data * input_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_kernel_multi_weights(
scalar_t* result_data,
const scalar_t* input_data,
const scalar_t* weight_data,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
// multiply values at each channel with weight[channel_index]
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val;
}
Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
int64_t weight_num = weight.numel();
Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto strides = input.strides();
// case1: shared weight for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_share_weights<scalar_t>(
input,
result,
weight.data_ptr<scalar_t>());
});
}
else { // case2: multiple weights, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_multi_weights<scalar_t>
<<<grid, block, 0, stream>>>(
result.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return result;
}
// -----------------------------------
// prelu backward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_backward_kernel_share_weights(
const Tensor& input,
const Tensor& grad_out,
Tensor& input_grad,
Tensor& weight_grad_collector,
const scalar_t* weight_data) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(input_grad)
.add_output(weight_grad_collector)
.add_input(input)
.add_input(grad_out)
.build();
// N.B. `std::tuple` does not support `::operator=` on device code.
gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple<scalar_t, scalar_t> {
scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out;
scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out;
return {input_grad, weight_grad_collector};
});
}
template <typename scalar_t>
__global__ void prelu_cuda_backward_kernel_multi_weights(
const scalar_t* input_data,
const scalar_t* weight_data,
const scalar_t* grad_out_data,
scalar_t* input_grad_data,
scalar_t* weight_grad_collector,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
scalar_t grad_out_data_val = grad_out_data[linearId];
input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val;
weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val;
}
std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(grad_out_.is_cuda());
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto grad_out = grad_out_.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
TORCH_CHECK(grad_out.is_contiguous());
int64_t weight_num = weight.numel();
auto strides = input.strides();
auto dims = input.dim();
Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// case1: shared parameter for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_share_weights<scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
weight.data_ptr<scalar_t>());
});
weight_grad.fill_(weight_grad_collector.sum());
}
else { // case2: multiple parameters, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_multi_weights<scalar_t>
<<<grid, block, 0, stream>>>(
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
grad_out.data_ptr<scalar_t>(),
input_grad.data_ptr<scalar_t>(),
weight_grad_collector.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
// update weight_grad
std::vector<int64_t> reduce_dims;
reduce_dims.push_back(0);
if (dims > 2) {
for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i);
}
weight_grad = weight_grad_collector.sum(reduce_dims);
}
return std::tuple<Tensor, Tensor>{input_grad, weight_grad};
}
// -----------------------------------
// hardshrink
// -----------------------------------
void hardshrink_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
});
});
}
void softshrink_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));
});
});
}
void shrink_backward_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t {
return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val;
});
});
}
void hardtanh_backward_kernel(TensorIterator& iter, const Scalar& min, const Scalar& max) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() {
auto min_val = min.to<scalar_t>();
auto max_val = max.to<scalar_t>();
gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a;
});
});
}
void softplus_kernel(TensorIterator& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(std::exp(a * beta))) / beta;
});
});
}
void softplus_backward_kernel(TensorIterator& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t z = std::exp(b * beta);
return (b * beta) > threshold ? a : a * z / (z + scalar_t(1.));
});
});
}
template <typename scalar_t>
void threshold_kernel_impl(TensorIteratorBase& iter, scalar_t threshold, scalar_t value) {
gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
});
}
static void threshold_kernel_cuda(TensorIteratorBase& iter, const Scalar& threshold, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] {
threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>());
});
}
void elu_kernel(TensorIterator& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a * poscoef : (static_cast<scalar_t>(std::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef;
});
});
}
void elu_backward_kernel(TensorIterator& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale, bool is_result) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
if (is_result) {
return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef;
} else {
return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast<scalar_t>(std::exp(b * negiptcoef))) : a * poscoef;
}
});
});
}
namespace {
void GeluCUDAKernelImpl(TensorIterator& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
return static_cast<T_ACC>(x) *
c10::cuda::compat::normcdf(static_cast<T_ACC>(x));
});
});
}
void GeluBackwardCUDAKernelImpl(TensorIterator& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5);
const T_ACC cdf = c10::cuda::compat::normcdf(static_cast<T_ACC>(x));
const T_ACC pdf =
c10::cuda::compat::exp(
T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) *
kBeta;
return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf);
});
});
}
void leaky_relu_kernel(TensorIterator& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a : a * negval;
});
});
}
void leaky_relu_backward_kernel(TensorIterator& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a > scalar_t(0) ? b : b * negval;
});
});
}
void hardswish_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return x * std::min(std::max(x + three, zero), six) * one_sixth;
});
});
}
void hardswish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_half(0.5f);
gpu_kernel(
iter,
[zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
if (self_val < neg_three) {
return zero;
} else if (self_val <= three) {
return grad_val * ((self_val / three) + one_half);
} else {
return grad_val;
}
});
});
}
void hardsigmoid_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return std::min(std::max(x + three, zero), six) * one_sixth;
});
});
}
void hardsigmoid_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_sixth(1.0f / 6.0f);
gpu_kernel(
iter,
[zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
return (self_val > neg_three && self_val < three)
? grad_val * one_sixth
: zero;
});
});
}
void silu_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC x_acc = static_cast<T_ACC>(x);
return x_acc / (T_ACC(1) + c10::cuda::compat::exp(-x_acc));
});
});
}
void silu_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_backward_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
const T_ACC s_acc =
T_ACC(1) / (T_ACC(1) + c10::cuda::compat::exp(-x_acc));
return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc));
});
});
}
} // namespace
Tensor gelu_cuda(const Tensor& self) {
Tensor Y = at::native::empty_like(
self,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto it = TensorIterator::unary_op(Y, self);
GeluCUDAKernelImpl(it);
return Y;
}
Tensor gelu_backward_cuda(const Tensor& grad, const Tensor& self) {
Tensor dX = at::native::empty_like(
self,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto it = TensorIterator::binary_op(dX, grad, self);
GeluBackwardCUDAKernelImpl(it);
return dX;
}
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
REGISTER_DISPATCH(elu_stub, &elu_kernel);
REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);
REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel);
REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel);
REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);
REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);
REGISTER_DISPATCH(softplus_stub, &softplus_kernel);
REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);
REGISTER_DISPATCH(silu_stub, &silu_kernel);
REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel);
REGISTER_DISPATCH(threshold_stub, &threshold_kernel_cuda);
} // namespace native
} // namespace at
|
b921f968e718a55c2c610b42acdb2b5f169dfc46.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.h"
#include "orc_gpu.h"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
struct compressed_stream_s {
CompressedStreamInfo info;
gpu_inflate_input_s ctl;
};
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8) gpuParseCompressedStripeData(
CompressedStreamInfo *strm_info, int32_t num_streams, uint32_t block_size, uint32_t log2maxcr)
{
__shared__ compressed_stream_s strm_g[4];
compressed_stream_s *const s = &strm_g[threadIdx.x / 32];
int strm_id = blockIdx.x * 4 + (threadIdx.x / 32);
int lane_id = threadIdx.x % 32;
if (lane_id == 0) { s->info = strm_info[strm_id]; }
__syncthreads();
if (strm_id < num_streams) {
// Walk through the compressed blocks
const uint8_t *cur = s->info.compressed_data;
const uint8_t *end = cur + s->info.compressed_data_size;
uint8_t *uncompressed = s->info.uncompressed_data;
size_t max_uncompressed_size = 0;
uint32_t num_compressed_blocks = 0;
uint32_t num_uncompressed_blocks = 0;
while (cur + 3 < end) {
uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0);
uint32_t is_uncompressed = block_len & 1;
uint32_t uncompressed_size;
gpu_inflate_input_s *init_ctl = nullptr;
block_len >>= 1;
cur += 3;
if (block_len > block_size || cur + block_len > end) {
// Fatal
num_compressed_blocks = 0;
max_uncompressed_size = 0;
break;
}
// TBD: For some codecs like snappy, it wouldn't be too difficult to get the actual
// uncompressed size and avoid waste due to block size alignment For now, rely on the max
// compression ratio to limit waste for the most extreme cases (small single-block streams)
uncompressed_size =
(is_uncompressed)
? block_len
: (block_len < (block_size >> log2maxcr)) ? block_len << log2maxcr : block_size;
if (is_uncompressed) {
if (uncompressed_size <= 32) {
// For short blocks, copy the uncompressed data to output
if (uncompressed &&
max_uncompressed_size + uncompressed_size <= s->info.max_uncompressed_size &&
lane_id < uncompressed_size) {
uncompressed[max_uncompressed_size + lane_id] = cur[lane_id];
}
} else {
init_ctl = s->info.copyctl;
init_ctl = (init_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks)
? &init_ctl[num_uncompressed_blocks]
: nullptr;
num_uncompressed_blocks++;
}
} else {
init_ctl = s->info.decctl;
init_ctl = (init_ctl && num_compressed_blocks < s->info.num_compressed_blocks)
? &init_ctl[num_compressed_blocks]
: nullptr;
num_compressed_blocks++;
}
if (!lane_id && init_ctl) {
s->ctl.srcDevice = const_cast<uint8_t *>(cur);
s->ctl.srcSize = block_len;
s->ctl.dstDevice = uncompressed + max_uncompressed_size;
s->ctl.dstSize = uncompressed_size;
}
__syncwarp();
if (init_ctl && lane_id == 0) *init_ctl = s->ctl;
cur += block_len;
max_uncompressed_size += uncompressed_size;
}
__syncwarp();
if (!lane_id) {
s->info.num_compressed_blocks = num_compressed_blocks;
s->info.num_uncompressed_blocks = num_uncompressed_blocks;
s->info.max_uncompressed_size = max_uncompressed_size;
}
}
__syncthreads();
if (strm_id < num_streams && lane_id == 0) strm_info[strm_id] = s->info;
}
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8)
gpuPostDecompressionReassemble(CompressedStreamInfo *strm_info, int32_t num_streams)
{
__shared__ compressed_stream_s strm_g[4];
compressed_stream_s *const s = &strm_g[threadIdx.x / 32];
int strm_id = blockIdx.x * 4 + (threadIdx.x / 32);
int lane_id = threadIdx.x % 32;
if (strm_id < num_streams && lane_id == 0) s->info = strm_info[strm_id];
__syncthreads();
if (strm_id < num_streams &&
s->info.num_compressed_blocks + s->info.num_uncompressed_blocks > 0 &&
s->info.max_uncompressed_size > 0) {
// Walk through the compressed blocks
const uint8_t *cur = s->info.compressed_data;
const uint8_t *end = cur + s->info.compressed_data_size;
const gpu_inflate_input_s *dec_in = s->info.decctl;
const gpu_inflate_status_s *dec_out = s->info.decstatus;
uint8_t *uncompressed_actual = s->info.uncompressed_data;
uint8_t *uncompressed_estimated = uncompressed_actual;
uint32_t num_compressed_blocks = 0;
uint32_t max_compressed_blocks = s->info.num_compressed_blocks;
while (cur + 3 < end) {
uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0);
uint32_t is_uncompressed = block_len & 1;
uint32_t uncompressed_size_est, uncompressed_size_actual;
block_len >>= 1;
cur += 3;
if (cur + block_len > end) { break; }
if (is_uncompressed) {
uncompressed_size_est = block_len;
uncompressed_size_actual = block_len;
} else {
if (num_compressed_blocks > max_compressed_blocks) { break; }
if (shuffle((lane_id == 0) ? dec_out[num_compressed_blocks].status : 0) != 0) {
// Decompression failed, not much point in doing anything else
break;
}
uncompressed_size_est =
shuffle((lane_id == 0) ? *(const uint32_t *)&dec_in[num_compressed_blocks].dstSize : 0);
uncompressed_size_actual = shuffle(
(lane_id == 0) ? *(const uint32_t *)&dec_out[num_compressed_blocks].bytes_written : 0);
}
// In practice, this should never happen with a well-behaved writer, as we would expect the
// uncompressed size to always be equal to the compression block size except for the last
// block
if (uncompressed_actual < uncompressed_estimated) {
// warp-level memmove
for (int i = lane_id; i < (int)uncompressed_size_actual; i += 32) {
uncompressed_actual[i] = uncompressed_estimated[i];
}
}
cur += block_len;
num_compressed_blocks += 1 - is_uncompressed;
uncompressed_estimated += uncompressed_size_est;
uncompressed_actual += uncompressed_size_actual;
}
// Update info with actual uncompressed size
if (!lane_id) {
size_t total_uncompressed_size = uncompressed_actual - s->info.uncompressed_data;
// Set uncompressed size to zero if there were any errors
strm_info[strm_id].max_uncompressed_size =
(num_compressed_blocks == s->info.num_compressed_blocks) ? total_uncompressed_size : 0;
}
}
}
/**
* @brief Shared mem state for gpuParseRowGroupIndex
*
*/
struct rowindex_state_s {
ColumnDesc chunk;
uint32_t rowgroup_start;
uint32_t rowgroup_end;
int is_compressed;
uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2
CompressedStreamInfo strm_info[2];
RowGroup rowgroups[128];
uint32_t compressed_offset[128][2];
};
enum row_entry_state_e {
NOT_FOUND = 0,
GET_LENGTH,
SKIP_VARINT,
SKIP_FIXEDLEN,
STORE_INDEX0,
STORE_INDEX1,
STORE_INDEX2,
};
/**
* @brief Decode a single row group index entry
*
* @param[in,out] s row group index state
* @param[in] start start position in byte stream
* @param[in] end end of byte stream
* @return bytes consumed
*
**/
static uint32_t __device__ ProtobufParseRowIndexEntry(rowindex_state_s *s,
const uint8_t *start,
const uint8_t *end)
{
constexpr uint32_t pb_rowindexentry_id = static_cast<uint32_t>(PB_TYPE_FIXEDLEN) + 8;
const uint8_t *cur = start;
row_entry_state_e state = NOT_FOUND;
uint32_t length = 0, strm_idx_id = s->chunk.skip_count >> 8, idx_id = 1, ci_id = CI_PRESENT,
pos_end = 0;
while (cur < end) {
uint32_t v = 0;
for (uint32_t l = 0; l <= 28; l += 7) {
uint32_t c = (cur < end) ? *cur++ : 0;
v |= (c & 0x7f) << l;
if (c <= 0x7f) break;
}
switch (state) {
case NOT_FOUND:
if (v == pb_rowindexentry_id) {
state = GET_LENGTH;
} else {
v &= 7;
if (v == PB_TYPE_FIXED64)
cur += 8;
else if (v == PB_TYPE_FIXED32)
cur += 4;
else if (v == PB_TYPE_VARINT)
state = SKIP_VARINT;
else if (v == PB_TYPE_FIXEDLEN)
state = SKIP_FIXEDLEN;
}
break;
case SKIP_VARINT: state = NOT_FOUND; break;
case SKIP_FIXEDLEN:
cur += v;
state = NOT_FOUND;
break;
case GET_LENGTH:
if (length == 0) {
length = (uint32_t)(cur + v - start);
state = NOT_FOUND; // Scan for positions (same field id & low-level type as RowIndexEntry
// entry)
} else {
pos_end = min((uint32_t)(cur + v - start), length);
state = STORE_INDEX0;
}
break;
case STORE_INDEX0:
ci_id = (idx_id == (strm_idx_id & 0xff))
? CI_DATA
: (idx_id == ((strm_idx_id >> 8) & 0xff)) ? CI_DATA2 : CI_PRESENT;
idx_id++;
if (s->is_compressed) {
if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = v;
if (cur >= start + pos_end) return length;
state = STORE_INDEX1;
break;
} else {
if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = 0;
// Fall through to STORE_INDEX1 for uncompressed (always block0)
}
case STORE_INDEX1:
if (ci_id < CI_PRESENT) s->row_index_entry[1][ci_id] = v;
if (cur >= start + pos_end) return length;
state = (ci_id == CI_DATA && s->chunk.encoding_kind != DICTIONARY &&
s->chunk.encoding_kind != DICTIONARY_V2 &&
(s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY ||
s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR ||
s->chunk.type_kind == DECIMAL || s->chunk.type_kind == FLOAT ||
s->chunk.type_kind == DOUBLE))
? STORE_INDEX0
: STORE_INDEX2;
break;
case STORE_INDEX2:
if (ci_id < CI_PRESENT) {
// Boolean columns have an extra byte to indicate the position of the bit within the byte
s->row_index_entry[2][ci_id] = (s->chunk.type_kind == BOOLEAN) ? (v << 3) + *cur : v;
}
if (ci_id == CI_PRESENT || s->chunk.type_kind == BOOLEAN) cur++;
if (cur >= start + pos_end) return length;
state = STORE_INDEX0;
break;
}
}
return (uint32_t)(end - start);
}
/**
* @brief Decode row group index entries
*
* @param[in,out] s row group index state
* @param[in] num_rowgroups Number of index entries to read
*
**/
static __device__ void gpuReadRowGroupIndexEntries(rowindex_state_s *s, int num_rowgroups)
{
const uint8_t *index_data = s->chunk.streams[CI_INDEX];
int index_data_len = s->chunk.strm_len[CI_INDEX];
for (int i = 0; i < num_rowgroups; i++) {
s->row_index_entry[0][0] = 0;
s->row_index_entry[0][1] = 0;
s->row_index_entry[1][0] = 0;
s->row_index_entry[1][1] = 0;
s->row_index_entry[2][0] = 0;
s->row_index_entry[2][1] = 0;
if (index_data_len > 0) {
int len = ProtobufParseRowIndexEntry(s, index_data, index_data + index_data_len);
index_data += len;
index_data_len = max(index_data_len - len, 0);
for (int j = 0; j < 2; j++) {
s->rowgroups[i].strm_offset[j] = s->row_index_entry[1][j];
s->rowgroups[i].run_pos[j] = s->row_index_entry[2][j];
s->compressed_offset[i][j] = s->row_index_entry[0][j];
}
}
}
s->chunk.streams[CI_INDEX] = index_data;
s->chunk.strm_len[CI_INDEX] = index_data_len;
}
/**
* @brief Translate block+offset compressed position into an uncompressed offset
*
* @param[in,out] s row group index state
* @param[in] ci_id index to convert (CI_DATA or CI_DATA2)
* @param[in] num_rowgroups Number of index entries
* @param[in] t thread id
*
**/
static __device__ void gpuMapRowIndexToUncompressed(rowindex_state_s *s,
int ci_id,
int num_rowgroups,
int t)
{
int32_t strm_len = s->chunk.strm_len[ci_id];
if (strm_len > 0) {
int32_t compressed_offset = (t < num_rowgroups) ? s->compressed_offset[t][ci_id] : 0;
if (compressed_offset > 0) {
const uint8_t *start = s->strm_info[ci_id].compressed_data;
const uint8_t *cur = start;
const uint8_t *end = cur + s->strm_info[ci_id].compressed_data_size;
gpu_inflate_status_s *decstatus = s->strm_info[ci_id].decstatus;
uint32_t uncomp_offset = 0;
for (;;) {
uint32_t block_len, is_uncompressed;
if (cur + 3 > end || cur + 3 >= start + compressed_offset) { break; }
block_len = cur[0] | (cur[1] << 8) | (cur[2] << 16);
cur += 3;
is_uncompressed = block_len & 1;
block_len >>= 1;
cur += block_len;
if (cur > end) { break; }
if (is_uncompressed) {
uncomp_offset += block_len;
} else {
uncomp_offset += decstatus->bytes_written;
decstatus++;
}
}
s->rowgroups[t].strm_offset[ci_id] += uncomp_offset;
}
}
}
/**
* @brief Decode index streams
*
* @param[out] row_groups RowGroup device array [rowgroup][column]
* @param[in] strm_info List of compressed streams (or NULL if uncompressed)
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] num_rowgroups Number of row groups
*
**/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8)
gpuParseRowGroupIndex(RowGroup *row_groups,
CompressedStreamInfo *strm_info,
ColumnDesc *chunks,
uint32_t num_columns,
uint32_t num_stripes,
uint32_t num_rowgroups,
uint32_t rowidx_stride)
{
__shared__ __align__(16) rowindex_state_s state_g;
rowindex_state_s *const s = &state_g;
uint32_t chunk_id = blockIdx.y * num_columns + blockIdx.x;
int t = threadIdx.x;
if (t == 0) {
s->chunk = chunks[chunk_id];
if (strm_info) {
if (s->chunk.strm_len[0] > 0) s->strm_info[0] = strm_info[s->chunk.strm_id[0]];
if (s->chunk.strm_len[1] > 0) s->strm_info[1] = strm_info[s->chunk.strm_id[1]];
}
uint32_t rowgroups_in_chunk =
(rowidx_stride > 0) ? (s->chunk.num_rows + rowidx_stride - 1) / rowidx_stride : 1;
s->rowgroup_start = s->chunk.rowgroup_id;
s->rowgroup_end = s->rowgroup_start + rowgroups_in_chunk;
s->is_compressed = (strm_info != NULL);
}
__syncthreads();
while (s->rowgroup_start < s->rowgroup_end) {
int num_rowgroups = min(s->rowgroup_end - s->rowgroup_start, 128);
int rowgroup_size4, t4, t32;
s->rowgroups[t].chunk_id = chunk_id;
if (t == 0) { gpuReadRowGroupIndexEntries(s, num_rowgroups); }
__syncthreads();
if (s->is_compressed) {
// Convert the block + blk_offset pair into a raw offset into the decompressed stream
if (s->chunk.strm_len[CI_DATA] > 0) {
gpuMapRowIndexToUncompressed(s, CI_DATA, num_rowgroups, t);
}
if (s->chunk.strm_len[CI_DATA2] > 0) {
gpuMapRowIndexToUncompressed(s, CI_DATA2, num_rowgroups, t);
}
__syncthreads();
}
rowgroup_size4 = sizeof(RowGroup) / sizeof(uint32_t);
t4 = t & 3;
t32 = t >> 2;
for (int i = t32; i < num_rowgroups; i += 32) {
for (int j = t4; j < rowgroup_size4; j += 4) {
((uint32_t *)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] =
((volatile uint32_t *)&s->rowgroups[i])[j];
}
}
__syncthreads();
if (t == 0) { s->rowgroup_start += num_rowgroups; }
__syncthreads();
}
}
void __host__ ParseCompressedStripeData(CompressedStreamInfo *strm_info,
int32_t num_streams,
uint32_t compression_block_size,
uint32_t log2maxcr,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block
hipLaunchKernelGGL(( gpuParseCompressedStripeData), dim3(dim_grid), dim3(dim_block), 0, stream.value(),
strm_info, num_streams, compression_block_size, log2maxcr);
}
void __host__ PostDecompressionReassemble(CompressedStreamInfo *strm_info,
int32_t num_streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block
hipLaunchKernelGGL(( gpuPostDecompressionReassemble), dim3(dim_grid), dim3(dim_block), 0, stream.value(), strm_info,
num_streams);
}
/**
* @brief Launches kernel for constructing rowgroup from index streams
*
* @param[out] row_groups RowGroup device array [rowgroup][column]
* @param[in] strm_info List of compressed streams (or NULL if uncompressed)
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] num_rowgroups Number of row groups
* @param[in] stream CUDA stream to use, default 0
*/
void __host__ ParseRowGroupIndex(RowGroup *row_groups,
CompressedStreamInfo *strm_info,
ColumnDesc *chunks,
uint32_t num_columns,
uint32_t num_stripes,
uint32_t num_rowgroups,
uint32_t rowidx_stride,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid(num_columns, num_stripes); // 1 column chunk per block
hipLaunchKernelGGL(( gpuParseRowGroupIndex), dim3(dim_grid), dim3(dim_block), 0, stream.value(),
row_groups, strm_info, chunks, num_columns, num_stripes, num_rowgroups, rowidx_stride);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
b921f968e718a55c2c610b42acdb2b5f169dfc46.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.h"
#include "orc_gpu.h"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
struct compressed_stream_s {
CompressedStreamInfo info;
gpu_inflate_input_s ctl;
};
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8) gpuParseCompressedStripeData(
CompressedStreamInfo *strm_info, int32_t num_streams, uint32_t block_size, uint32_t log2maxcr)
{
__shared__ compressed_stream_s strm_g[4];
compressed_stream_s *const s = &strm_g[threadIdx.x / 32];
int strm_id = blockIdx.x * 4 + (threadIdx.x / 32);
int lane_id = threadIdx.x % 32;
if (lane_id == 0) { s->info = strm_info[strm_id]; }
__syncthreads();
if (strm_id < num_streams) {
// Walk through the compressed blocks
const uint8_t *cur = s->info.compressed_data;
const uint8_t *end = cur + s->info.compressed_data_size;
uint8_t *uncompressed = s->info.uncompressed_data;
size_t max_uncompressed_size = 0;
uint32_t num_compressed_blocks = 0;
uint32_t num_uncompressed_blocks = 0;
while (cur + 3 < end) {
uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0);
uint32_t is_uncompressed = block_len & 1;
uint32_t uncompressed_size;
gpu_inflate_input_s *init_ctl = nullptr;
block_len >>= 1;
cur += 3;
if (block_len > block_size || cur + block_len > end) {
// Fatal
num_compressed_blocks = 0;
max_uncompressed_size = 0;
break;
}
// TBD: For some codecs like snappy, it wouldn't be too difficult to get the actual
// uncompressed size and avoid waste due to block size alignment For now, rely on the max
// compression ratio to limit waste for the most extreme cases (small single-block streams)
uncompressed_size =
(is_uncompressed)
? block_len
: (block_len < (block_size >> log2maxcr)) ? block_len << log2maxcr : block_size;
if (is_uncompressed) {
if (uncompressed_size <= 32) {
// For short blocks, copy the uncompressed data to output
if (uncompressed &&
max_uncompressed_size + uncompressed_size <= s->info.max_uncompressed_size &&
lane_id < uncompressed_size) {
uncompressed[max_uncompressed_size + lane_id] = cur[lane_id];
}
} else {
init_ctl = s->info.copyctl;
init_ctl = (init_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks)
? &init_ctl[num_uncompressed_blocks]
: nullptr;
num_uncompressed_blocks++;
}
} else {
init_ctl = s->info.decctl;
init_ctl = (init_ctl && num_compressed_blocks < s->info.num_compressed_blocks)
? &init_ctl[num_compressed_blocks]
: nullptr;
num_compressed_blocks++;
}
if (!lane_id && init_ctl) {
s->ctl.srcDevice = const_cast<uint8_t *>(cur);
s->ctl.srcSize = block_len;
s->ctl.dstDevice = uncompressed + max_uncompressed_size;
s->ctl.dstSize = uncompressed_size;
}
__syncwarp();
if (init_ctl && lane_id == 0) *init_ctl = s->ctl;
cur += block_len;
max_uncompressed_size += uncompressed_size;
}
__syncwarp();
if (!lane_id) {
s->info.num_compressed_blocks = num_compressed_blocks;
s->info.num_uncompressed_blocks = num_uncompressed_blocks;
s->info.max_uncompressed_size = max_uncompressed_size;
}
}
__syncthreads();
if (strm_id < num_streams && lane_id == 0) strm_info[strm_id] = s->info;
}
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8)
gpuPostDecompressionReassemble(CompressedStreamInfo *strm_info, int32_t num_streams)
{
__shared__ compressed_stream_s strm_g[4];
compressed_stream_s *const s = &strm_g[threadIdx.x / 32];
int strm_id = blockIdx.x * 4 + (threadIdx.x / 32);
int lane_id = threadIdx.x % 32;
if (strm_id < num_streams && lane_id == 0) s->info = strm_info[strm_id];
__syncthreads();
if (strm_id < num_streams &&
s->info.num_compressed_blocks + s->info.num_uncompressed_blocks > 0 &&
s->info.max_uncompressed_size > 0) {
// Walk through the compressed blocks
const uint8_t *cur = s->info.compressed_data;
const uint8_t *end = cur + s->info.compressed_data_size;
const gpu_inflate_input_s *dec_in = s->info.decctl;
const gpu_inflate_status_s *dec_out = s->info.decstatus;
uint8_t *uncompressed_actual = s->info.uncompressed_data;
uint8_t *uncompressed_estimated = uncompressed_actual;
uint32_t num_compressed_blocks = 0;
uint32_t max_compressed_blocks = s->info.num_compressed_blocks;
while (cur + 3 < end) {
uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0);
uint32_t is_uncompressed = block_len & 1;
uint32_t uncompressed_size_est, uncompressed_size_actual;
block_len >>= 1;
cur += 3;
if (cur + block_len > end) { break; }
if (is_uncompressed) {
uncompressed_size_est = block_len;
uncompressed_size_actual = block_len;
} else {
if (num_compressed_blocks > max_compressed_blocks) { break; }
if (shuffle((lane_id == 0) ? dec_out[num_compressed_blocks].status : 0) != 0) {
// Decompression failed, not much point in doing anything else
break;
}
uncompressed_size_est =
shuffle((lane_id == 0) ? *(const uint32_t *)&dec_in[num_compressed_blocks].dstSize : 0);
uncompressed_size_actual = shuffle(
(lane_id == 0) ? *(const uint32_t *)&dec_out[num_compressed_blocks].bytes_written : 0);
}
// In practice, this should never happen with a well-behaved writer, as we would expect the
// uncompressed size to always be equal to the compression block size except for the last
// block
if (uncompressed_actual < uncompressed_estimated) {
// warp-level memmove
for (int i = lane_id; i < (int)uncompressed_size_actual; i += 32) {
uncompressed_actual[i] = uncompressed_estimated[i];
}
}
cur += block_len;
num_compressed_blocks += 1 - is_uncompressed;
uncompressed_estimated += uncompressed_size_est;
uncompressed_actual += uncompressed_size_actual;
}
// Update info with actual uncompressed size
if (!lane_id) {
size_t total_uncompressed_size = uncompressed_actual - s->info.uncompressed_data;
// Set uncompressed size to zero if there were any errors
strm_info[strm_id].max_uncompressed_size =
(num_compressed_blocks == s->info.num_compressed_blocks) ? total_uncompressed_size : 0;
}
}
}
/**
* @brief Shared mem state for gpuParseRowGroupIndex
*
*/
struct rowindex_state_s {
ColumnDesc chunk;
uint32_t rowgroup_start;
uint32_t rowgroup_end;
int is_compressed;
uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2
CompressedStreamInfo strm_info[2];
RowGroup rowgroups[128];
uint32_t compressed_offset[128][2];
};
enum row_entry_state_e {
NOT_FOUND = 0,
GET_LENGTH,
SKIP_VARINT,
SKIP_FIXEDLEN,
STORE_INDEX0,
STORE_INDEX1,
STORE_INDEX2,
};
/**
* @brief Decode a single row group index entry
*
* @param[in,out] s row group index state
* @param[in] start start position in byte stream
* @param[in] end end of byte stream
* @return bytes consumed
*
**/
static uint32_t __device__ ProtobufParseRowIndexEntry(rowindex_state_s *s,
const uint8_t *start,
const uint8_t *end)
{
constexpr uint32_t pb_rowindexentry_id = static_cast<uint32_t>(PB_TYPE_FIXEDLEN) + 8;
const uint8_t *cur = start;
row_entry_state_e state = NOT_FOUND;
uint32_t length = 0, strm_idx_id = s->chunk.skip_count >> 8, idx_id = 1, ci_id = CI_PRESENT,
pos_end = 0;
while (cur < end) {
uint32_t v = 0;
for (uint32_t l = 0; l <= 28; l += 7) {
uint32_t c = (cur < end) ? *cur++ : 0;
v |= (c & 0x7f) << l;
if (c <= 0x7f) break;
}
switch (state) {
case NOT_FOUND:
if (v == pb_rowindexentry_id) {
state = GET_LENGTH;
} else {
v &= 7;
if (v == PB_TYPE_FIXED64)
cur += 8;
else if (v == PB_TYPE_FIXED32)
cur += 4;
else if (v == PB_TYPE_VARINT)
state = SKIP_VARINT;
else if (v == PB_TYPE_FIXEDLEN)
state = SKIP_FIXEDLEN;
}
break;
case SKIP_VARINT: state = NOT_FOUND; break;
case SKIP_FIXEDLEN:
cur += v;
state = NOT_FOUND;
break;
case GET_LENGTH:
if (length == 0) {
length = (uint32_t)(cur + v - start);
state = NOT_FOUND; // Scan for positions (same field id & low-level type as RowIndexEntry
// entry)
} else {
pos_end = min((uint32_t)(cur + v - start), length);
state = STORE_INDEX0;
}
break;
case STORE_INDEX0:
ci_id = (idx_id == (strm_idx_id & 0xff))
? CI_DATA
: (idx_id == ((strm_idx_id >> 8) & 0xff)) ? CI_DATA2 : CI_PRESENT;
idx_id++;
if (s->is_compressed) {
if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = v;
if (cur >= start + pos_end) return length;
state = STORE_INDEX1;
break;
} else {
if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = 0;
// Fall through to STORE_INDEX1 for uncompressed (always block0)
}
case STORE_INDEX1:
if (ci_id < CI_PRESENT) s->row_index_entry[1][ci_id] = v;
if (cur >= start + pos_end) return length;
state = (ci_id == CI_DATA && s->chunk.encoding_kind != DICTIONARY &&
s->chunk.encoding_kind != DICTIONARY_V2 &&
(s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY ||
s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR ||
s->chunk.type_kind == DECIMAL || s->chunk.type_kind == FLOAT ||
s->chunk.type_kind == DOUBLE))
? STORE_INDEX0
: STORE_INDEX2;
break;
case STORE_INDEX2:
if (ci_id < CI_PRESENT) {
// Boolean columns have an extra byte to indicate the position of the bit within the byte
s->row_index_entry[2][ci_id] = (s->chunk.type_kind == BOOLEAN) ? (v << 3) + *cur : v;
}
if (ci_id == CI_PRESENT || s->chunk.type_kind == BOOLEAN) cur++;
if (cur >= start + pos_end) return length;
state = STORE_INDEX0;
break;
}
}
return (uint32_t)(end - start);
}
/**
* @brief Decode row group index entries
*
* @param[in,out] s row group index state
* @param[in] num_rowgroups Number of index entries to read
*
**/
static __device__ void gpuReadRowGroupIndexEntries(rowindex_state_s *s, int num_rowgroups)
{
const uint8_t *index_data = s->chunk.streams[CI_INDEX];
int index_data_len = s->chunk.strm_len[CI_INDEX];
for (int i = 0; i < num_rowgroups; i++) {
s->row_index_entry[0][0] = 0;
s->row_index_entry[0][1] = 0;
s->row_index_entry[1][0] = 0;
s->row_index_entry[1][1] = 0;
s->row_index_entry[2][0] = 0;
s->row_index_entry[2][1] = 0;
if (index_data_len > 0) {
int len = ProtobufParseRowIndexEntry(s, index_data, index_data + index_data_len);
index_data += len;
index_data_len = max(index_data_len - len, 0);
for (int j = 0; j < 2; j++) {
s->rowgroups[i].strm_offset[j] = s->row_index_entry[1][j];
s->rowgroups[i].run_pos[j] = s->row_index_entry[2][j];
s->compressed_offset[i][j] = s->row_index_entry[0][j];
}
}
}
s->chunk.streams[CI_INDEX] = index_data;
s->chunk.strm_len[CI_INDEX] = index_data_len;
}
/**
* @brief Translate block+offset compressed position into an uncompressed offset
*
* @param[in,out] s row group index state
* @param[in] ci_id index to convert (CI_DATA or CI_DATA2)
* @param[in] num_rowgroups Number of index entries
* @param[in] t thread id
*
**/
static __device__ void gpuMapRowIndexToUncompressed(rowindex_state_s *s,
int ci_id,
int num_rowgroups,
int t)
{
int32_t strm_len = s->chunk.strm_len[ci_id];
if (strm_len > 0) {
int32_t compressed_offset = (t < num_rowgroups) ? s->compressed_offset[t][ci_id] : 0;
if (compressed_offset > 0) {
const uint8_t *start = s->strm_info[ci_id].compressed_data;
const uint8_t *cur = start;
const uint8_t *end = cur + s->strm_info[ci_id].compressed_data_size;
gpu_inflate_status_s *decstatus = s->strm_info[ci_id].decstatus;
uint32_t uncomp_offset = 0;
for (;;) {
uint32_t block_len, is_uncompressed;
if (cur + 3 > end || cur + 3 >= start + compressed_offset) { break; }
block_len = cur[0] | (cur[1] << 8) | (cur[2] << 16);
cur += 3;
is_uncompressed = block_len & 1;
block_len >>= 1;
cur += block_len;
if (cur > end) { break; }
if (is_uncompressed) {
uncomp_offset += block_len;
} else {
uncomp_offset += decstatus->bytes_written;
decstatus++;
}
}
s->rowgroups[t].strm_offset[ci_id] += uncomp_offset;
}
}
}
/**
* @brief Decode index streams
*
* @param[out] row_groups RowGroup device array [rowgroup][column]
* @param[in] strm_info List of compressed streams (or NULL if uncompressed)
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] num_rowgroups Number of row groups
*
**/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8)
gpuParseRowGroupIndex(RowGroup *row_groups,
CompressedStreamInfo *strm_info,
ColumnDesc *chunks,
uint32_t num_columns,
uint32_t num_stripes,
uint32_t num_rowgroups,
uint32_t rowidx_stride)
{
__shared__ __align__(16) rowindex_state_s state_g;
rowindex_state_s *const s = &state_g;
uint32_t chunk_id = blockIdx.y * num_columns + blockIdx.x;
int t = threadIdx.x;
if (t == 0) {
s->chunk = chunks[chunk_id];
if (strm_info) {
if (s->chunk.strm_len[0] > 0) s->strm_info[0] = strm_info[s->chunk.strm_id[0]];
if (s->chunk.strm_len[1] > 0) s->strm_info[1] = strm_info[s->chunk.strm_id[1]];
}
uint32_t rowgroups_in_chunk =
(rowidx_stride > 0) ? (s->chunk.num_rows + rowidx_stride - 1) / rowidx_stride : 1;
s->rowgroup_start = s->chunk.rowgroup_id;
s->rowgroup_end = s->rowgroup_start + rowgroups_in_chunk;
s->is_compressed = (strm_info != NULL);
}
__syncthreads();
while (s->rowgroup_start < s->rowgroup_end) {
int num_rowgroups = min(s->rowgroup_end - s->rowgroup_start, 128);
int rowgroup_size4, t4, t32;
s->rowgroups[t].chunk_id = chunk_id;
if (t == 0) { gpuReadRowGroupIndexEntries(s, num_rowgroups); }
__syncthreads();
if (s->is_compressed) {
// Convert the block + blk_offset pair into a raw offset into the decompressed stream
if (s->chunk.strm_len[CI_DATA] > 0) {
gpuMapRowIndexToUncompressed(s, CI_DATA, num_rowgroups, t);
}
if (s->chunk.strm_len[CI_DATA2] > 0) {
gpuMapRowIndexToUncompressed(s, CI_DATA2, num_rowgroups, t);
}
__syncthreads();
}
rowgroup_size4 = sizeof(RowGroup) / sizeof(uint32_t);
t4 = t & 3;
t32 = t >> 2;
for (int i = t32; i < num_rowgroups; i += 32) {
for (int j = t4; j < rowgroup_size4; j += 4) {
((uint32_t *)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] =
((volatile uint32_t *)&s->rowgroups[i])[j];
}
}
__syncthreads();
if (t == 0) { s->rowgroup_start += num_rowgroups; }
__syncthreads();
}
}
void __host__ ParseCompressedStripeData(CompressedStreamInfo *strm_info,
int32_t num_streams,
uint32_t compression_block_size,
uint32_t log2maxcr,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block
gpuParseCompressedStripeData<<<dim_grid, dim_block, 0, stream.value()>>>(
strm_info, num_streams, compression_block_size, log2maxcr);
}
void __host__ PostDecompressionReassemble(CompressedStreamInfo *strm_info,
int32_t num_streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block
gpuPostDecompressionReassemble<<<dim_grid, dim_block, 0, stream.value()>>>(strm_info,
num_streams);
}
/**
* @brief Launches kernel for constructing rowgroup from index streams
*
* @param[out] row_groups RowGroup device array [rowgroup][column]
* @param[in] strm_info List of compressed streams (or NULL if uncompressed)
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] num_rowgroups Number of row groups
* @param[in] stream CUDA stream to use, default 0
*/
void __host__ ParseRowGroupIndex(RowGroup *row_groups,
CompressedStreamInfo *strm_info,
ColumnDesc *chunks,
uint32_t num_columns,
uint32_t num_stripes,
uint32_t num_rowgroups,
uint32_t rowidx_stride,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid(num_columns, num_stripes); // 1 column chunk per block
gpuParseRowGroupIndex<<<dim_grid, dim_block, 0, stream.value()>>>(
row_groups, strm_info, chunks, num_columns, num_stripes, num_rowgroups, rowidx_stride);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
be75d7877561c3abff9503f11cc17d7d3b082531.hip
|
// !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::Gemm<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
2>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
be75d7877561c3abff9503f11cc17d7d3b082531.cu
|
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::Gemm<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
2>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
e5619fd20495620d7956165e2391d91f9c87848f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <assert.h>
#include <iostream>
#include <random>
#include <rocblas.h>
#include "linear_layer.hh"
#include "nn_exception.hh"
#define LEARNING_RATE 0.01
__global__ void ReluActivationForward(float* Z, float* A,float* Stored_Z, int Z_x_dim, int Z_y_dim) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
A[index] = fmaxf(Z[index], 0);
Stored_Z[index] = A[index];
}
}
__global__ void ReluActivationBackprop(float* Z, float* dA, float* dZ, int Z_x_dim, int Z_y_dim) {
//int nnodes = 2708;
//int num_test_nodes = nnodes - (0.6*nnodes);
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
if (Z[index] > 0) {
dZ[index] = dA[index];
}
else {
dZ[index] = 0;
}
//Adding it to quickly see if I can set output of node agg 0 for test nodes
/*
if(index < num_test_nodes) {
dZ[index] = 0;
}*/
}
/*
if((row > 2700)) {
printf("ReLU x = %d, y = %d, dZ = %f, dA = %f\n", row, i, dZ[i + dA_y_dim * row], dA[i + dA_y_dim * row]);
}*/
}
__global__ void linearLayerForward( float* W, float* A, float* Z, float* b,
int W_x_dim, int W_y_dim,
int A_x_dim, int A_y_dim){
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int Z_x_dim = A_x_dim;
int Z_y_dim = W_x_dim;
float Z_value = 0;
if( row < Z_x_dim && col < Z_y_dim){
for(int i=0; i< W_y_dim; i=i+1){
Z_value += W[i + W_y_dim * col] * A[i + A_y_dim * row];
}
Z[row * Z_y_dim + col] = Z_value + b[col];
// if(Z[row * Z_y_dim + col] > 0)
// printf("Z[%d]: %f\n", row * Z_y_dim + col, Z[row * Z_y_dim + col]);
}
}
__global__ void linearLayerForwardAddBias( float* Z, float* bias, int numFeatures) {
// APARNA TODO: fuse bias addition and reLU application
// APARNA TODO: if this takes a lot of time -- can merge computations for some features like fuseGNN
//Add Z: #nodes * #labels , b: labels * 1 (or 1 * labels) doesn't matter
//APARNA TODO: maybe doing an inner loop where we process > 1 node per CTA will help -- will reduce launch overhead
/*
for(int feature = threadIdx.x ; feature < numFeatures; feature += blockDim.x) {
Z[blockIdx.x * numFeatures + feature] = Z[blockIdx.x * numFeatures + feature] + bias[feature];
}*/
Z[blockIdx.x * numFeatures + threadIdx.x] = Z[blockIdx.x * numFeatures + threadIdx.x] + bias[threadIdx.x];
}
__global__ void linearLayerBackprop( float* W, float* dZ, float*dA,
int W_x_dim, int W_y_dim,
int dZ_x_dim, int dZ_y_dim){
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int dA_x_dim = dZ_x_dim;
int dA_y_dim = W_y_dim;
float dA_value = 0.0f;
if (row < dA_x_dim && col < dA_y_dim) {
for (int i = 0; i < W_x_dim; i++) {
dA_value += -1 * W[i * W_y_dim + col] * dZ[ i + dZ_y_dim * row];
}
dA[row * dA_y_dim + col] = dA_value;
}
}
__global__ void linearLayerUpdateWeights( float* W, float* dW,
int W_x_dim, int W_y_dim,
float learning_rate) {
//W = W - (n) * dW
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x < W_x_dim && y < W_y_dim) {
W[x * W_y_dim + y] += (-1) * (learning_rate) * dW[x * W_y_dim + y];
}
}
/*
//Reduces mxn array into 1xm array
__global__ void reduce_array(volatile scalar_t* sdata, unsigned int tid, unsigned int reduce_len, unsigned int f_dim){
__shared__ scalar_t s_feature[blockSize];
while (reduce_len > 1){
__syncthreads();
// add the remainer
if ((tid < f_dim) && (reduce_len % 2 == 1)){
sdata[tid] += sdata[tid + f_dim * (reduce_len - 1)];
}
reduce_len /= 2;
if (tid < f_dim * reduce_len){
sdata[tid] += sdata[tid + f_dim * reduce_len];
}
}
}
*/
__global__ void linearLayerUpdateBias( float* dZ, float* b,
int dZ_x_dim, int dZ_y_dim,
int b_x_dim,
float learning_rate) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dZ_x_dim * dZ_y_dim) {
int dZ_x = index % dZ_y_dim;
int dZ_y = index / dZ_y_dim;
atomicAdd(&b[dZ_y], - learning_rate * (dZ[dZ_y * dZ_y_dim + dZ_x] / dZ_y_dim));
}
}
//__global__ void linearLayerUpdateBias( float* dZ, float* b,
// int dZ_x_dim, int dZ_y_dim,
// int b_x_dim,
// float learning_rate) {
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// int tid = threadIdx.x;
//
// //Setting to a high value
// extern __shared__ float buffer[];
//
// //Assuming #of output features > # of threads
// if(tid < dZ_y_dim) {
// buffer[tid] = 0;
// }
// __syncthreads();
//
// if (index < dZ_x_dim * dZ_y_dim) {
// int dZ_x = index / dZ_y_dim;
// int dZ_y = index % dZ_y_dim;
// atomicAdd(&buffer[dZ_y], dZ[dZ_x * dZ_y_dim + dZ_y]);
// }
//
// __syncthreads();
//
// if(tid < dZ_y_dim) {
// atomicAdd(&b[tid], -learning_rate*buffer[tid]/dZ_y_dim);
// }
//}
void LinearLayer::runGEMM(Matrix& A, Matrix& B, Matrix& C, bool transposeA, bool transposeB) {
//The take transpose function is for back propagation --> we multiply A.B' instead of A.B if this is turned on
// Create a handle for CUBLAS
hipblasHandle_t handle;
hipblasCreate(&handle);
// Do the actual multiplication
//alpha * op(A) * op(B) + beta * OP(C)
// C(m,n) = A(m,k) * B(k,n)
int m = C.shape.x;
int n = C.shape.y;
int k = transposeA ? B.shape.x : A.shape.y;
//int lda=k,ldb=n,ldc=n;
int lda=transposeA ? m : k; //mxk
//int ldb= n; //transposeB ? n : k; // kxn
int ldb= transposeB ? k : n; // kxn
int ldc=n; //mxn
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
//Note: This function can't support the case when both transposeA and B are set to 1
hipblasSgemm(handle,
transposeB ? HIPBLAS_OP_T : HIPBLAS_OP_N,
transposeA ? HIPBLAS_OP_T : HIPBLAS_OP_N,
n, m, k, alpha, B.data_device, ldb, A.data_device, lda, beta, C.data_device, ldc);
//print_kernel<<<1,1>>>(Z.data_device);
// Destroy the handle
hipblasDestroy(handle);
}
LinearLayer::LinearLayer(std::string name,int layer_num, Shape W_shape):
W(W_shape),b(W_shape.y,1),dW(W_shape)
{
this->name = name;
this->layer_num = layer_num;
// std::cout << "updated layer name\n";
b.allocateCudaMemory();
// std::cout << "b allocated\n";
W.allocateMemory();
dW.allocateMemory();
// std::cout << "w allocated\n";
initializeBiasWithZeros();
// std::cout << "bias initialized\n";
initializeWeightsRandomly();
// std::cout << "weights initialized\n";
}
void LinearLayer::free_matrix(){
dW.freeMem();
}
LinearLayer::~LinearLayer()
{ };
void LinearLayer::initializeWeightsRandomly(){
std::default_random_engine generator;
std::normal_distribution<float> normal_distribution(0.0, 0.1);
// std::cout << "W.shape.x:" << W.shape.x <<"\n";
// std::cout << "W.shape.y:" << W.shape.y <<"\n";
for(int x = 0; x < W.shape.x; x++){
for(int y = 0 ; y < W.shape.y; y++){
W[x * W.shape.y + y] = normal_distribution(generator)*0.1;
//printf("W[%d] = %f\n", (x * W.shape.y + y), W[x * W.shape.y + y]);
}
}
// std::cout << "copying data from host to device\n";
W.copyHostToDevice();
free(W.data_host);
}
void LinearLayer::initializeBiasWithZeros() {
//for (int x = 0; x < b.shape.x; x++) {
// b[x] = 0;
//}
//b.copyHostToDevice();
hipMemset(b.data_device, 0, b.shape.x * b.shape.y* sizeof(float));
}
__global__ void print_kernel_lin(float *A, int size, std::string str) {
for(int i=0; i<size; i++) {
if(A[i] != 0.0) {
printf("The value of %s[%d] = %f\n", str, i, A[i]);
}
}
}
Matrix& LinearLayer::forward(Matrix& A, bool training, bool freeMatrix){
// std::cout << " Linear forward A.x:" << A.shape.x << "\n";
// std::cout << " Linear forward A.y:" << A.shape.y << "\n";
// std::cout << " Linear forward W.x:" << W.shape.x << "\n";
// std::cout << " Linear forward W.y:" << W.shape.y << "\n";
// std::cout << " Linear forward A address:" << A.data_device << "\n";
assert(W.shape.x = A.shape.y);
// std::cout << "Linear layer forward\n";
//std::cout<< "Linear Layer ptr:" << A.data_device << "\n";
this->A = A;
//std::cout<< "Linear Layer ptr:" << A.data_device << "\n";
Shape Z_shape(A.shape.x,W.shape.y);
Z.allocateCuda(Z_shape);
computeAndStoreLayerOutput(A);
if(layer_num != 2){
stored_Z.allocateCuda(Z.shape);
dim3 block_size(256);
dim3 num_of_blocks((Z.shape.y * Z.shape.x + block_size.x - 1) / block_size.x);
hipLaunchKernelGGL(( ReluActivationForward), dim3(num_of_blocks), dim3(block_size), 0, 0, Z.data_device, Z.data_device,stored_Z.data_device, Z.shape.x, Z.shape.y);
}
// std::cout << "Linear Layer forward\n";
NNException::throwIfDeviceErrorOccurred("Cannot perform Linear Layer forward propagation");
// std::cout << " Linear forward shape.x:" << Z.shape.x << "\n";
// std::cout << " Linear forward shape.y:" << Z.shape.y << "\n";
// std::cout << " Linear forward A shape.x:" << A.shape.x << "\n";
// std::cout << " Linear forward A shape.y:" << A.shape.y << "\n";
// std::cout << " Linear forward A address:" << A.data_device << "\n";
if(training == false) {
if(freeMatrix) {
A.freeMem();
}
}
return Z;
}
__global__ void print_weight_sum(float *W, float *dW, int size) {
float w_sum = 0;
float dw_sum = 0;
float w_sum_mod = 0;
float dw_sum_mod = 0;
for(int i=0; i<size; i++) {
w_sum += W[i];
dw_sum += dW[i];
w_sum_mod += (W[i] > 0) ? W[i] : -W[i];
dw_sum_mod += (dW[i] > 0) ? dW[i] : -dW[i];
}
printf("The value of Weight Sum = %f, dW sum = %f\n", w_sum, dw_sum);
printf("The value of MOD Weight Sum = %f, dW sum = %f\n", w_sum_mod, dw_sum_mod);
}
void LinearLayer::computeAndStoreLayerOutput(Matrix& A) {
runGEMM(A, W, Z, false, false);
//Num CTAs = #nodes, #threads = min(256, numFeatures)
int threadsPerBlock = ::min(256, (int) W.shape.y);
hipLaunchKernelGGL(( linearLayerForwardAddBias), dim3((Z.shape.x + threadsPerBlock - 1)/threadsPerBlock), dim3(threadsPerBlock), 0, 0, Z.data_device, b.data_device, Z.shape.y);
}
Matrix& LinearLayer::backprop(Matrix& dZ, float learning_rate, bool freeMatrix) {
// std::cout << "Linear layer backword\n";
if(layer_num != 2){
dim3 block_size(256);
dim3 num_of_blocks((stored_Z.shape.y * stored_Z.shape.x + block_size.x - 1) / block_size.x);
hipLaunchKernelGGL(( ReluActivationBackprop), dim3(num_of_blocks), dim3(block_size), 0, 0, stored_Z.data_device, dZ.data_device,dZ.data_device, stored_Z.shape.x, stored_Z.shape.y);
NNException::throwIfDeviceErrorOccurred("Cannot perform ReLU back propagation");
}
dA.allocateCuda(A.shape);
//dW.allocateCuda(W.shape); //A'.dZ
// std::cout << "Linear Layer backward\n";
//print_kernel_lin<<<1,1>>>(dZ.data_device, dZ.shape.x*dZ.shape.y, "dZ - pre backprop ");
computeAndStoreBackpropError(dZ);
NNException::throwIfDeviceErrorOccurred("Cannot perform back propagation.");
/*
if(dZ.shape.y == 32) {
printf("Printing dZ of lin 1 layer\n");
print_kernel_lin<<<1,1>>>(dZ.data_device, dZ.shape.x*dZ.shape.y, "dZ - in backprop ");
}*/
updateBias(dZ, learning_rate);
NNException::throwIfDeviceErrorOccurred("Cannot perform bias update.");
//std::cout << " A ptr: " << A.data_device << "\n";
//std::cout << " A last :" << A.data_device + (A.shape.x * A.shape.y * 4) << "\n";
//std::cout << " dZ ptr: " << dZ.data_device << "\n";
//std::cout << " dZ last :" << dZ.data_device + (dZ.shape.x * dZ.shape.y * 4) << "\n";
//std::cout << " Linear backward shape dZ.x:" << dZ.shape.x << "\n";
//std::cout << " Linear backward shape dZ.y:" << dZ.shape.y << "\n";
//std::cout << " Linear backward shape A.x:" << A.shape.x << "\n";
//std::cout << " Linear backward shape A.y:" << A.shape.y << "\n";
updateWeights(dZ, learning_rate);
NNException::throwIfDeviceErrorOccurred("Cannot perform weights update.");
//std::cout << " Linear backward shape.x:" << dA.shape.x << "\n";
//std::cout << " Linear backward shape.y:" << dA.shape.y << "\n";
stored_Z.freeMem();
//dZ.freeMem();
//dW.freeMem();
if(A.device_allocated == true){
if(freeMatrix){
A.freeMem();
}
}
return dA;
}
void LinearLayer::computeAndStoreBackpropError(Matrix& dZ) {
//std::cout << "dZ.x = " << dZ.shape.x << ", dZ.y = " << dZ.shape.y << std::endl;
//std::cout << "dA.x = " << dA.shape.x << ", dA.y = " << dA.shape.y << std::endl;
//W: 10x7, dz: 2708x7, dA: 2708x10
// So dA = dz.W'
runGEMM(dZ, W, dA, false, true);
//TODO: need to multiply dA with -1. <<< Are we sure??? -- why not do that in dZ calculation?>>>
/*
if(dZ.shape.y == 7) {
printf("Printing dA\n");
print_kernel_lin<<<1,1>>>(dA.data_device, dA.shape.x*dA.shape.y, "dA ");
hipDeviceSynchronize();
}*/
}
void LinearLayer::updateWeights(Matrix& dZ, float learning_rate) {
//dW = A'.dZ
//dw: 10x7, A: 2708x10, dZ: 2708x7
runGEMM(A, dZ, dW, true, false);
//print_weight_sum<<<1,1>>>(W.data_device, dW.data_device, W.shape.x*W.shape.y);
//Weight size is 1433x16 and 16x7
//W = W - (n) * dW
dim3 block_size(16, 16);
dim3 num_of_blocks((W.shape.x + block_size.x - 1) / block_size.x,(W.shape.y + block_size.y - 1) / block_size.y);
hipLaunchKernelGGL(( linearLayerUpdateWeights), dim3(num_of_blocks), dim3(block_size), 0, 0, W.data_device,
dW.data_device,
W.shape.x, W.shape.y,
learning_rate);
}
void LinearLayer::updateBias(Matrix& dZ, float learning_rate) {
//db: 1x7
//The operation is dB = dZ.(reduce in Xdim) so 2708x7 --> 1x7
//Then b = b - (n) * dB
//Need to write a reduction kernel for the first line
//print_kernel_lin<<<1,1>>>(dZ.data_device, dZ.shape.x*dZ.shape.y, "dZ - pre bias ");
dim3 block_size(512);
dim3 num_of_blocks( (dZ.shape.y * dZ.shape.x + block_size.x - 1) / block_size.x);
hipLaunchKernelGGL(( linearLayerUpdateBias), dim3(num_of_blocks), dim3(block_size), dZ.shape.y, 0, dZ.data_device,
b.data_device,
dZ.shape.x, dZ.shape.y,
b.shape.x, learning_rate);
//printf("Bias X: %d, Y: %d\n", b.shape.x, b.shape.y);
//print_kernel_lin<<<1,1>>>(b.data_device, b.shape.x*b.shape.y, "bias");
}
int LinearLayer::getXdim() const {
return W.shape.x;
}
int LinearLayer::getYdim() const {
return W.shape.y;
}
Matrix LinearLayer::getWeightsMatrix() const {
return W;
}
Matrix LinearLayer::getBiasVector() const {
return b;
}
|
e5619fd20495620d7956165e2391d91f9c87848f.cu
|
#include <stdlib.h>
#include <assert.h>
#include <iostream>
#include <random>
#include <cublas_v2.h>
#include "linear_layer.hh"
#include "nn_exception.hh"
#define LEARNING_RATE 0.01
__global__ void ReluActivationForward(float* Z, float* A,float* Stored_Z, int Z_x_dim, int Z_y_dim) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
A[index] = fmaxf(Z[index], 0);
Stored_Z[index] = A[index];
}
}
__global__ void ReluActivationBackprop(float* Z, float* dA, float* dZ, int Z_x_dim, int Z_y_dim) {
//int nnodes = 2708;
//int num_test_nodes = nnodes - (0.6*nnodes);
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
if (Z[index] > 0) {
dZ[index] = dA[index];
}
else {
dZ[index] = 0;
}
//Adding it to quickly see if I can set output of node agg 0 for test nodes
/*
if(index < num_test_nodes) {
dZ[index] = 0;
}*/
}
/*
if((row > 2700)) {
printf("ReLU x = %d, y = %d, dZ = %f, dA = %f\n", row, i, dZ[i + dA_y_dim * row], dA[i + dA_y_dim * row]);
}*/
}
__global__ void linearLayerForward( float* W, float* A, float* Z, float* b,
int W_x_dim, int W_y_dim,
int A_x_dim, int A_y_dim){
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int Z_x_dim = A_x_dim;
int Z_y_dim = W_x_dim;
float Z_value = 0;
if( row < Z_x_dim && col < Z_y_dim){
for(int i=0; i< W_y_dim; i=i+1){
Z_value += W[i + W_y_dim * col] * A[i + A_y_dim * row];
}
Z[row * Z_y_dim + col] = Z_value + b[col];
// if(Z[row * Z_y_dim + col] > 0)
// printf("Z[%d]: %f\n", row * Z_y_dim + col, Z[row * Z_y_dim + col]);
}
}
__global__ void linearLayerForwardAddBias( float* Z, float* bias, int numFeatures) {
// APARNA TODO: fuse bias addition and reLU application
// APARNA TODO: if this takes a lot of time -- can merge computations for some features like fuseGNN
//Add Z: #nodes * #labels , b: labels * 1 (or 1 * labels) doesn't matter
//APARNA TODO: maybe doing an inner loop where we process > 1 node per CTA will help -- will reduce launch overhead
/*
for(int feature = threadIdx.x ; feature < numFeatures; feature += blockDim.x) {
Z[blockIdx.x * numFeatures + feature] = Z[blockIdx.x * numFeatures + feature] + bias[feature];
}*/
Z[blockIdx.x * numFeatures + threadIdx.x] = Z[blockIdx.x * numFeatures + threadIdx.x] + bias[threadIdx.x];
}
__global__ void linearLayerBackprop( float* W, float* dZ, float*dA,
int W_x_dim, int W_y_dim,
int dZ_x_dim, int dZ_y_dim){
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int dA_x_dim = dZ_x_dim;
int dA_y_dim = W_y_dim;
float dA_value = 0.0f;
if (row < dA_x_dim && col < dA_y_dim) {
for (int i = 0; i < W_x_dim; i++) {
dA_value += -1 * W[i * W_y_dim + col] * dZ[ i + dZ_y_dim * row];
}
dA[row * dA_y_dim + col] = dA_value;
}
}
__global__ void linearLayerUpdateWeights( float* W, float* dW,
int W_x_dim, int W_y_dim,
float learning_rate) {
//W = W - (n) * dW
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x < W_x_dim && y < W_y_dim) {
W[x * W_y_dim + y] += (-1) * (learning_rate) * dW[x * W_y_dim + y];
}
}
/*
//Reduces mxn array into 1xm array
__global__ void reduce_array(volatile scalar_t* sdata, unsigned int tid, unsigned int reduce_len, unsigned int f_dim){
__shared__ scalar_t s_feature[blockSize];
while (reduce_len > 1){
__syncthreads();
// add the remainer
if ((tid < f_dim) && (reduce_len % 2 == 1)){
sdata[tid] += sdata[tid + f_dim * (reduce_len - 1)];
}
reduce_len /= 2;
if (tid < f_dim * reduce_len){
sdata[tid] += sdata[tid + f_dim * reduce_len];
}
}
}
*/
__global__ void linearLayerUpdateBias( float* dZ, float* b,
int dZ_x_dim, int dZ_y_dim,
int b_x_dim,
float learning_rate) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dZ_x_dim * dZ_y_dim) {
int dZ_x = index % dZ_y_dim;
int dZ_y = index / dZ_y_dim;
atomicAdd(&b[dZ_y], - learning_rate * (dZ[dZ_y * dZ_y_dim + dZ_x] / dZ_y_dim));
}
}
//__global__ void linearLayerUpdateBias( float* dZ, float* b,
// int dZ_x_dim, int dZ_y_dim,
// int b_x_dim,
// float learning_rate) {
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// int tid = threadIdx.x;
//
// //Setting to a high value
// extern __shared__ float buffer[];
//
// //Assuming #of output features > # of threads
// if(tid < dZ_y_dim) {
// buffer[tid] = 0;
// }
// __syncthreads();
//
// if (index < dZ_x_dim * dZ_y_dim) {
// int dZ_x = index / dZ_y_dim;
// int dZ_y = index % dZ_y_dim;
// atomicAdd(&buffer[dZ_y], dZ[dZ_x * dZ_y_dim + dZ_y]);
// }
//
// __syncthreads();
//
// if(tid < dZ_y_dim) {
// atomicAdd(&b[tid], -learning_rate*buffer[tid]/dZ_y_dim);
// }
//}
void LinearLayer::runGEMM(Matrix& A, Matrix& B, Matrix& C, bool transposeA, bool transposeB) {
//The take transpose function is for back propagation --> we multiply A.B' instead of A.B if this is turned on
// Create a handle for CUBLAS
cublasHandle_t handle;
cublasCreate(&handle);
// Do the actual multiplication
//alpha * op(A) * op(B) + beta * OP(C)
// C(m,n) = A(m,k) * B(k,n)
int m = C.shape.x;
int n = C.shape.y;
int k = transposeA ? B.shape.x : A.shape.y;
//int lda=k,ldb=n,ldc=n;
int lda=transposeA ? m : k; //mxk
//int ldb= n; //transposeB ? n : k; // kxn
int ldb= transposeB ? k : n; // kxn
int ldc=n; //mxn
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
//Note: This function can't support the case when both transposeA and B are set to 1
cublasSgemm(handle,
transposeB ? CUBLAS_OP_T : CUBLAS_OP_N,
transposeA ? CUBLAS_OP_T : CUBLAS_OP_N,
n, m, k, alpha, B.data_device, ldb, A.data_device, lda, beta, C.data_device, ldc);
//print_kernel<<<1,1>>>(Z.data_device);
// Destroy the handle
cublasDestroy(handle);
}
LinearLayer::LinearLayer(std::string name,int layer_num, Shape W_shape):
W(W_shape),b(W_shape.y,1),dW(W_shape)
{
this->name = name;
this->layer_num = layer_num;
// std::cout << "updated layer name\n";
b.allocateCudaMemory();
// std::cout << "b allocated\n";
W.allocateMemory();
dW.allocateMemory();
// std::cout << "w allocated\n";
initializeBiasWithZeros();
// std::cout << "bias initialized\n";
initializeWeightsRandomly();
// std::cout << "weights initialized\n";
}
void LinearLayer::free_matrix(){
dW.freeMem();
}
LinearLayer::~LinearLayer()
{ };
void LinearLayer::initializeWeightsRandomly(){
std::default_random_engine generator;
std::normal_distribution<float> normal_distribution(0.0, 0.1);
// std::cout << "W.shape.x:" << W.shape.x <<"\n";
// std::cout << "W.shape.y:" << W.shape.y <<"\n";
for(int x = 0; x < W.shape.x; x++){
for(int y = 0 ; y < W.shape.y; y++){
W[x * W.shape.y + y] = normal_distribution(generator)*0.1;
//printf("W[%d] = %f\n", (x * W.shape.y + y), W[x * W.shape.y + y]);
}
}
// std::cout << "copying data from host to device\n";
W.copyHostToDevice();
free(W.data_host);
}
void LinearLayer::initializeBiasWithZeros() {
//for (int x = 0; x < b.shape.x; x++) {
// b[x] = 0;
//}
//b.copyHostToDevice();
cudaMemset(b.data_device, 0, b.shape.x * b.shape.y* sizeof(float));
}
__global__ void print_kernel_lin(float *A, int size, std::string str) {
for(int i=0; i<size; i++) {
if(A[i] != 0.0) {
printf("The value of %s[%d] = %f\n", str, i, A[i]);
}
}
}
Matrix& LinearLayer::forward(Matrix& A, bool training, bool freeMatrix){
// std::cout << " Linear forward A.x:" << A.shape.x << "\n";
// std::cout << " Linear forward A.y:" << A.shape.y << "\n";
// std::cout << " Linear forward W.x:" << W.shape.x << "\n";
// std::cout << " Linear forward W.y:" << W.shape.y << "\n";
// std::cout << " Linear forward A address:" << A.data_device << "\n";
assert(W.shape.x = A.shape.y);
// std::cout << "Linear layer forward\n";
//std::cout<< "Linear Layer ptr:" << A.data_device << "\n";
this->A = A;
//std::cout<< "Linear Layer ptr:" << A.data_device << "\n";
Shape Z_shape(A.shape.x,W.shape.y);
Z.allocateCuda(Z_shape);
computeAndStoreLayerOutput(A);
if(layer_num != 2){
stored_Z.allocateCuda(Z.shape);
dim3 block_size(256);
dim3 num_of_blocks((Z.shape.y * Z.shape.x + block_size.x - 1) / block_size.x);
ReluActivationForward<<<num_of_blocks, block_size>>>(Z.data_device, Z.data_device,stored_Z.data_device, Z.shape.x, Z.shape.y);
}
// std::cout << "Linear Layer forward\n";
NNException::throwIfDeviceErrorOccurred("Cannot perform Linear Layer forward propagation");
// std::cout << " Linear forward shape.x:" << Z.shape.x << "\n";
// std::cout << " Linear forward shape.y:" << Z.shape.y << "\n";
// std::cout << " Linear forward A shape.x:" << A.shape.x << "\n";
// std::cout << " Linear forward A shape.y:" << A.shape.y << "\n";
// std::cout << " Linear forward A address:" << A.data_device << "\n";
if(training == false) {
if(freeMatrix) {
A.freeMem();
}
}
return Z;
}
__global__ void print_weight_sum(float *W, float *dW, int size) {
float w_sum = 0;
float dw_sum = 0;
float w_sum_mod = 0;
float dw_sum_mod = 0;
for(int i=0; i<size; i++) {
w_sum += W[i];
dw_sum += dW[i];
w_sum_mod += (W[i] > 0) ? W[i] : -W[i];
dw_sum_mod += (dW[i] > 0) ? dW[i] : -dW[i];
}
printf("The value of Weight Sum = %f, dW sum = %f\n", w_sum, dw_sum);
printf("The value of MOD Weight Sum = %f, dW sum = %f\n", w_sum_mod, dw_sum_mod);
}
void LinearLayer::computeAndStoreLayerOutput(Matrix& A) {
runGEMM(A, W, Z, false, false);
//Num CTAs = #nodes, #threads = min(256, numFeatures)
int threadsPerBlock = std::min(256, (int) W.shape.y);
linearLayerForwardAddBias<<<(Z.shape.x + threadsPerBlock - 1)/threadsPerBlock, threadsPerBlock>>>(Z.data_device, b.data_device, Z.shape.y);
}
Matrix& LinearLayer::backprop(Matrix& dZ, float learning_rate, bool freeMatrix) {
// std::cout << "Linear layer backword\n";
if(layer_num != 2){
dim3 block_size(256);
dim3 num_of_blocks((stored_Z.shape.y * stored_Z.shape.x + block_size.x - 1) / block_size.x);
ReluActivationBackprop<<<num_of_blocks, block_size>>>(stored_Z.data_device, dZ.data_device,dZ.data_device, stored_Z.shape.x, stored_Z.shape.y);
NNException::throwIfDeviceErrorOccurred("Cannot perform ReLU back propagation");
}
dA.allocateCuda(A.shape);
//dW.allocateCuda(W.shape); //A'.dZ
// std::cout << "Linear Layer backward\n";
//print_kernel_lin<<<1,1>>>(dZ.data_device, dZ.shape.x*dZ.shape.y, "dZ - pre backprop ");
computeAndStoreBackpropError(dZ);
NNException::throwIfDeviceErrorOccurred("Cannot perform back propagation.");
/*
if(dZ.shape.y == 32) {
printf("Printing dZ of lin 1 layer\n");
print_kernel_lin<<<1,1>>>(dZ.data_device, dZ.shape.x*dZ.shape.y, "dZ - in backprop ");
}*/
updateBias(dZ, learning_rate);
NNException::throwIfDeviceErrorOccurred("Cannot perform bias update.");
//std::cout << " A ptr: " << A.data_device << "\n";
//std::cout << " A last :" << A.data_device + (A.shape.x * A.shape.y * 4) << "\n";
//std::cout << " dZ ptr: " << dZ.data_device << "\n";
//std::cout << " dZ last :" << dZ.data_device + (dZ.shape.x * dZ.shape.y * 4) << "\n";
//std::cout << " Linear backward shape dZ.x:" << dZ.shape.x << "\n";
//std::cout << " Linear backward shape dZ.y:" << dZ.shape.y << "\n";
//std::cout << " Linear backward shape A.x:" << A.shape.x << "\n";
//std::cout << " Linear backward shape A.y:" << A.shape.y << "\n";
updateWeights(dZ, learning_rate);
NNException::throwIfDeviceErrorOccurred("Cannot perform weights update.");
//std::cout << " Linear backward shape.x:" << dA.shape.x << "\n";
//std::cout << " Linear backward shape.y:" << dA.shape.y << "\n";
stored_Z.freeMem();
//dZ.freeMem();
//dW.freeMem();
if(A.device_allocated == true){
if(freeMatrix){
A.freeMem();
}
}
return dA;
}
void LinearLayer::computeAndStoreBackpropError(Matrix& dZ) {
//std::cout << "dZ.x = " << dZ.shape.x << ", dZ.y = " << dZ.shape.y << std::endl;
//std::cout << "dA.x = " << dA.shape.x << ", dA.y = " << dA.shape.y << std::endl;
//W: 10x7, dz: 2708x7, dA: 2708x10
// So dA = dz.W'
runGEMM(dZ, W, dA, false, true);
//TODO: need to multiply dA with -1. <<< Are we sure??? -- why not do that in dZ calculation?>>>
/*
if(dZ.shape.y == 7) {
printf("Printing dA\n");
print_kernel_lin<<<1,1>>>(dA.data_device, dA.shape.x*dA.shape.y, "dA ");
cudaDeviceSynchronize();
}*/
}
void LinearLayer::updateWeights(Matrix& dZ, float learning_rate) {
//dW = A'.dZ
//dw: 10x7, A: 2708x10, dZ: 2708x7
runGEMM(A, dZ, dW, true, false);
//print_weight_sum<<<1,1>>>(W.data_device, dW.data_device, W.shape.x*W.shape.y);
//Weight size is 1433x16 and 16x7
//W = W - (n) * dW
dim3 block_size(16, 16);
dim3 num_of_blocks((W.shape.x + block_size.x - 1) / block_size.x,(W.shape.y + block_size.y - 1) / block_size.y);
linearLayerUpdateWeights<<<num_of_blocks, block_size>>>(W.data_device,
dW.data_device,
W.shape.x, W.shape.y,
learning_rate);
}
void LinearLayer::updateBias(Matrix& dZ, float learning_rate) {
//db: 1x7
//The operation is dB = dZ.(reduce in Xdim) so 2708x7 --> 1x7
//Then b = b - (n) * dB
//Need to write a reduction kernel for the first line
//print_kernel_lin<<<1,1>>>(dZ.data_device, dZ.shape.x*dZ.shape.y, "dZ - pre bias ");
dim3 block_size(512);
dim3 num_of_blocks( (dZ.shape.y * dZ.shape.x + block_size.x - 1) / block_size.x);
linearLayerUpdateBias<<<num_of_blocks, block_size, dZ.shape.y>>>(dZ.data_device,
b.data_device,
dZ.shape.x, dZ.shape.y,
b.shape.x, learning_rate);
//printf("Bias X: %d, Y: %d\n", b.shape.x, b.shape.y);
//print_kernel_lin<<<1,1>>>(b.data_device, b.shape.x*b.shape.y, "bias");
}
int LinearLayer::getXdim() const {
return W.shape.x;
}
int LinearLayer::getYdim() const {
return W.shape.y;
}
Matrix LinearLayer::getWeightsMatrix() const {
return W;
}
Matrix LinearLayer::getBiasVector() const {
return b;
}
|
2ddc98c988137758f60668f3861356831bbdee1c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/Exceptions.h>
#include <THH/THHTensorMathReduce.cuh>
#include <math.h>
#include <ATen/native/Distance.h>
namespace at { namespace native {
namespace {
static const int forward_threads = 256;
#ifdef __HIP_PLATFORM_HCC__
static const int WARP_SIZE = 64;
#else
static const int WARP_SIZE = 32;
#endif
template <typename scalar_t>
static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val);
template <>
__forceinline__ __device__ float device_sqrt(float val) {
return ::sqrtf(val);
}
template <>
__forceinline__ __device__ double device_sqrt(double val) {
return ::sqrt(val);
}
template <typename scalar_t>
struct dists {
static __forceinline__ __device__ scalar_t sign(scalar_t val) {
return (0 < val) - (val < 0);
}
// Zero norm
struct zero {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff != 0.0; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
};
// One norm
struct one {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff); }
};
// Special case backward when p is less than two
struct lt_two {
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : sign(diff) * ::pow(std::abs(diff), p - 1) * grad / ::pow(dist, p - 1); }
};
// Two norm
struct two {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff * diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return device_sqrt<scalar_t>(agg); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : grad * diff / dist; }
};
// General p norm
struct p {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += ::pow(diff, p); }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return ::pow(agg, static_cast<scalar_t>(1) / p); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : diff * ::pow(std::abs(diff), p - 2) * grad / ::pow(dist, p - 1); }
};
// Inf norm
struct inf {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { if (diff > agg) { agg = diff; } }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { if (other > update) { update = other; } }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff) * (std::abs(diff) == dist); }
};
};
template <typename scalar_t, typename F>
__device__ static inline scalar_t reduce_agg(scalar_t agg) {
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
__shared__ scalar_t shared[forward_threads];
int lane = threadIdx.x % warpSize;
int warp_id = threadIdx.x / warpSize;
if (lane == 0) {
shared[warp_id] = agg;
}
__syncthreads();
agg = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0.0;
if (warp_id == 0) {
for (int offset = blockDim.x / warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
}
return agg;
}
template <typename scalar_t, typename F>
__global__ static void pdist_kernel_cuda_impl(scalar_t * result, const scalar_t * self, const int64_t n, const int64_t m, const scalar_t p,
const double n2, const double n2_squared_minus_1) {
const int k = blockIdx.x;
const int stride = blockDim.x;
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * a = start + threadIdx.x;
const scalar_t * b = self + j * m + threadIdx.x;
scalar_t agg = 0.0;
for (; a < end; a += stride, b += stride) {
F::inc(agg, std::abs(*a - *b), p);
}
agg = reduce_agg<scalar_t, F>(agg);
if (threadIdx.x == 0) {
result[k] = F::finish(agg, p);
}
}
template <typename scalar_t, typename F>
__global__ static void cdist_backward_kernel_cuda_impl(scalar_t * buffer, const scalar_t * grad, const scalar_t * x1, const scalar_t * x2, const scalar_t * dist, int64_t gs,
const scalar_t p, const int64_t r1, const int64_t r2, const int64_t m, const int64_t count) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int init = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (k >= count) {
return;
}
int64_t i = k / r2;
int64_t j = k % r2;
const scalar_t grad_k = grad[k * gs];
const scalar_t dist_k = dist[k];
const scalar_t * const start = x1 + i * m;
const scalar_t * const end = start + m;
const scalar_t * self_i = start + init;
const scalar_t * self_j = x2 + j * m + init;
scalar_t * buff_i = buffer + (r1 * j + i) * m + init;
for (; self_i < end; self_i += stride, self_j += stride, buff_i += stride) {
const scalar_t res = F::backward(*self_i - *self_j, grad_k, dist_k, p);
*buff_i = res;
}
}
template <typename scalar_t, typename F>
__global__ static void pdist_backward_kernel_cuda_impl(scalar_t * buffer, const scalar_t * grad, const scalar_t * self, const scalar_t * dist, int64_t gs, const int64_t n, const int64_t m, const int64_t combs, const scalar_t p,
const double n2, const double n2_squared_minus_1) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int init = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (k >= combs) {
return;
}
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
int64_t ib = j - i - 1;
int64_t jb = n - 2 - i;
const scalar_t grad_k = grad[k * gs];
const scalar_t dist_k = dist[k];
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * self_i = start + init;
const scalar_t * self_j = self + j * m + init;
scalar_t * buff_i = buffer + (ib * n + i) * m + init;
scalar_t * buff_j = buffer + (jb * n + j) * m + init;
for (; self_i < end; self_i += stride, self_j += stride, buff_i += stride, buff_j += stride) {
const scalar_t res = F::backward(*self_i - *self_j, grad_k, dist_k, p);
*buff_i = res;
*buff_j = -res;
}
}
template <typename scalar_t, typename F>
__global__ static void cdist_kernel_cuda_impl(scalar_t * result, const scalar_t * x1, const scalar_t * x2, const scalar_t p, const int64_t r1, const int64_t r2, const int64_t m) {
const int k = blockIdx.x;
const int64_t i = k / r2;
const int64_t j = k % r2;
const int stride = blockDim.x;
const scalar_t * const start = x1 + i * m;
const scalar_t * const end = start + m;
const scalar_t * a = start + threadIdx.x;
const scalar_t * b = x2 + j * m + threadIdx.x;
scalar_t agg = 0.0;
for (; a < end; a += stride, b += stride) {
F::inc(agg, std::abs(*a - *b), p);
}
agg = reduce_agg<scalar_t, F>(agg);
if (threadIdx.x == 0) {
result[k] = F::finish(agg, p);
}
}
void cdist_kernel_impl(Tensor& result, const Tensor& x1, const Tensor& x2, double p) {
int64_t r1 = x1.size(-2);
int64_t r2 = x2.size(-2);
int64_t m = x1.size(-1);
const dim3 grid(r1*r2);
const dim3 block(::min((int64_t)forward_threads, ((m - 1) / WARP_SIZE + 1) * WARP_SIZE));
AT_DISPATCH_FLOATING_TYPES(x1.scalar_type(), "cdist_cuda", [&] {
if (p == 0.0) {
hipLaunchKernelGGL(( cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::zero>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else if (p == 1.0) {
hipLaunchKernelGGL(( cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::one>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else if (p == 2.0) {
hipLaunchKernelGGL(( cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::two>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else if (std::isinf(p)) {
hipLaunchKernelGGL(( cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else {
hipLaunchKernelGGL(( cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::p>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
}
});
AT_CUDA_CHECK(hipGetLastError());
}
void pdist_forward_kernel_impl(Tensor& result, const Tensor& self, double p) {
const dim3 grid(result.numel());
const dim3 block(forward_threads);
int64_t n = self.size(0);
int64_t m = self.size(1);
// https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do
// some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device.
const double n2 = n - .5;
const double n2_squared_minus_1 = n2 * n2 - 1;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "pdist_cuda", [&] {
if (p == 0.0) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::zero>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (p == 1.0) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::one>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (p == 2.0) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::two>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (std::isinf(p)) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::p>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
}
});
AT_CUDA_CHECK(hipGetLastError());
}
void pdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& self, const double p, const Tensor& dist) {
if (p == 0.0 || grad.numel() == 0 || self.numel() == 0) {
result.fill_(0);
return;
}
const int64_t n = result.size(0);
int64_t m = self.size(1);
const int block_x = 64;
// NB: be careful with changing block_y; as it's currently written, grid_y is limited to be 2^16.
// From binary search, block_y of 16 gives us max pdist dim0 of 1449,
// block_y of 4 gives us max pdist dim0 of 725.
const int block_y = 16;
const int grid_x = (m + block_x * 8 - 1) / (block_x * 8);
const int grid_y = (dist.numel() + block_y - 1) / block_y;
const dim3 grid(grid_x, grid_y);
const dim3 block(block_x, block_y);
// https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do
// some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device.
const double n2 = n - .5;
const double n2_squared_minus_1 = n2 * n2 - 1;
Tensor buffer = at::empty({n - 1, result.size(0), result.size(1)}, result.options());
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "pdist_cuda_backward", [&] {
if (p == 1.0) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::one>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (p < 2.0) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::lt_two>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (p == 2.0) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::two>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (std::isinf(p)) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::p>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
}
});
AT_CUDA_CHECK(hipGetLastError());
at::sum_out(result, buffer, 0);
}
void cdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& x1, const Tensor& x2, const double p, const Tensor& dist) {
if (p == 0.0 || grad.numel() == 0 || x1.numel() == 0 || x2.numel() == 0) {
result.fill_(0);
return;
}
const int64_t r1 = x1.size(-2);
const int64_t r2 = x2.size(-2);
const int64_t m = x1.size(-1);
const int block_x = 64;
const int block_y = 16;
const int grid_x = (m + block_x * 8 - 1) / (block_x * 8);
const int grid_y = (dist.numel() + block_y - 1) / block_y;
const dim3 grid(grid_x, grid_y);
const dim3 block(block_x, block_y);
const int64_t count = dist.numel();
Tensor buffer = at::empty({r2, r1, m}, result.options());
AT_DISPATCH_FLOATING_TYPES(result.scalar_type(), "cdist_cuda_backward", [&] {
if (p == 1.0) {
hipLaunchKernelGGL(( cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::one>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else if (p < 2.0) {
hipLaunchKernelGGL(( cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::lt_two>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else if (p == 2.0) {
hipLaunchKernelGGL(( cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::two>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else if (std::isinf(p)) {
hipLaunchKernelGGL(( cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else {
hipLaunchKernelGGL(( cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::p>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
}
});
AT_CUDA_CHECK(hipGetLastError());
at::sum_out(result, buffer, 0);
}
} // anonymous namespace
REGISTER_DISPATCH(pdist_forward_stub, &pdist_forward_kernel_impl);
REGISTER_DISPATCH(pdist_backward_stub, &pdist_backward_kernel_impl);
REGISTER_DISPATCH(cdist_stub, &cdist_kernel_impl);
REGISTER_DISPATCH(cdist_backward_stub, &cdist_backward_kernel_impl);
}} // at::native
|
2ddc98c988137758f60668f3861356831bbdee1c.cu
|
#include <ATen/ATen.h>
#include <ATen/cuda/Exceptions.h>
#include <THC/THCTensorMathReduce.cuh>
#include <math.h>
#include <ATen/native/Distance.h>
namespace at { namespace native {
namespace {
static const int forward_threads = 256;
#ifdef __HIP_PLATFORM_HCC__
static const int WARP_SIZE = 64;
#else
static const int WARP_SIZE = 32;
#endif
template <typename scalar_t>
static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val);
template <>
__forceinline__ __device__ float device_sqrt(float val) {
return ::sqrtf(val);
}
template <>
__forceinline__ __device__ double device_sqrt(double val) {
return ::sqrt(val);
}
template <typename scalar_t>
struct dists {
static __forceinline__ __device__ scalar_t sign(scalar_t val) {
return (0 < val) - (val < 0);
}
// Zero norm
struct zero {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff != 0.0; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
};
// One norm
struct one {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff); }
};
// Special case backward when p is less than two
struct lt_two {
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : sign(diff) * std::pow(std::abs(diff), p - 1) * grad / std::pow(dist, p - 1); }
};
// Two norm
struct two {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff * diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return device_sqrt<scalar_t>(agg); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : grad * diff / dist; }
};
// General p norm
struct p {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += std::pow(diff, p); }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return std::pow(agg, static_cast<scalar_t>(1) / p); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : diff * std::pow(std::abs(diff), p - 2) * grad / std::pow(dist, p - 1); }
};
// Inf norm
struct inf {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { if (diff > agg) { agg = diff; } }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { if (other > update) { update = other; } }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff) * (std::abs(diff) == dist); }
};
};
template <typename scalar_t, typename F>
__device__ static inline scalar_t reduce_agg(scalar_t agg) {
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
__shared__ scalar_t shared[forward_threads];
int lane = threadIdx.x % warpSize;
int warp_id = threadIdx.x / warpSize;
if (lane == 0) {
shared[warp_id] = agg;
}
__syncthreads();
agg = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0.0;
if (warp_id == 0) {
for (int offset = blockDim.x / warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
}
return agg;
}
template <typename scalar_t, typename F>
__global__ static void pdist_kernel_cuda_impl(scalar_t * result, const scalar_t * self, const int64_t n, const int64_t m, const scalar_t p,
const double n2, const double n2_squared_minus_1) {
const int k = blockIdx.x;
const int stride = blockDim.x;
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * a = start + threadIdx.x;
const scalar_t * b = self + j * m + threadIdx.x;
scalar_t agg = 0.0;
for (; a < end; a += stride, b += stride) {
F::inc(agg, std::abs(*a - *b), p);
}
agg = reduce_agg<scalar_t, F>(agg);
if (threadIdx.x == 0) {
result[k] = F::finish(agg, p);
}
}
template <typename scalar_t, typename F>
__global__ static void cdist_backward_kernel_cuda_impl(scalar_t * buffer, const scalar_t * grad, const scalar_t * x1, const scalar_t * x2, const scalar_t * dist, int64_t gs,
const scalar_t p, const int64_t r1, const int64_t r2, const int64_t m, const int64_t count) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int init = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (k >= count) {
return;
}
int64_t i = k / r2;
int64_t j = k % r2;
const scalar_t grad_k = grad[k * gs];
const scalar_t dist_k = dist[k];
const scalar_t * const start = x1 + i * m;
const scalar_t * const end = start + m;
const scalar_t * self_i = start + init;
const scalar_t * self_j = x2 + j * m + init;
scalar_t * buff_i = buffer + (r1 * j + i) * m + init;
for (; self_i < end; self_i += stride, self_j += stride, buff_i += stride) {
const scalar_t res = F::backward(*self_i - *self_j, grad_k, dist_k, p);
*buff_i = res;
}
}
template <typename scalar_t, typename F>
__global__ static void pdist_backward_kernel_cuda_impl(scalar_t * buffer, const scalar_t * grad, const scalar_t * self, const scalar_t * dist, int64_t gs, const int64_t n, const int64_t m, const int64_t combs, const scalar_t p,
const double n2, const double n2_squared_minus_1) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int init = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (k >= combs) {
return;
}
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
int64_t ib = j - i - 1;
int64_t jb = n - 2 - i;
const scalar_t grad_k = grad[k * gs];
const scalar_t dist_k = dist[k];
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * self_i = start + init;
const scalar_t * self_j = self + j * m + init;
scalar_t * buff_i = buffer + (ib * n + i) * m + init;
scalar_t * buff_j = buffer + (jb * n + j) * m + init;
for (; self_i < end; self_i += stride, self_j += stride, buff_i += stride, buff_j += stride) {
const scalar_t res = F::backward(*self_i - *self_j, grad_k, dist_k, p);
*buff_i = res;
*buff_j = -res;
}
}
template <typename scalar_t, typename F>
__global__ static void cdist_kernel_cuda_impl(scalar_t * result, const scalar_t * x1, const scalar_t * x2, const scalar_t p, const int64_t r1, const int64_t r2, const int64_t m) {
const int k = blockIdx.x;
const int64_t i = k / r2;
const int64_t j = k % r2;
const int stride = blockDim.x;
const scalar_t * const start = x1 + i * m;
const scalar_t * const end = start + m;
const scalar_t * a = start + threadIdx.x;
const scalar_t * b = x2 + j * m + threadIdx.x;
scalar_t agg = 0.0;
for (; a < end; a += stride, b += stride) {
F::inc(agg, std::abs(*a - *b), p);
}
agg = reduce_agg<scalar_t, F>(agg);
if (threadIdx.x == 0) {
result[k] = F::finish(agg, p);
}
}
void cdist_kernel_impl(Tensor& result, const Tensor& x1, const Tensor& x2, double p) {
int64_t r1 = x1.size(-2);
int64_t r2 = x2.size(-2);
int64_t m = x1.size(-1);
const dim3 grid(r1*r2);
const dim3 block(std::min((int64_t)forward_threads, ((m - 1) / WARP_SIZE + 1) * WARP_SIZE));
AT_DISPATCH_FLOATING_TYPES(x1.scalar_type(), "cdist_cuda", [&] {
if (p == 0.0) {
cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::zero><<<grid, block>>>(result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else if (p == 1.0) {
cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::one><<<grid, block>>>(result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else if (p == 2.0) {
cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::two><<<grid, block>>>(result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else if (std::isinf(p)) {
cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf><<<grid, block>>>(result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else {
cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::p><<<grid, block>>>(result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
}
});
AT_CUDA_CHECK(cudaGetLastError());
}
void pdist_forward_kernel_impl(Tensor& result, const Tensor& self, double p) {
const dim3 grid(result.numel());
const dim3 block(forward_threads);
int64_t n = self.size(0);
int64_t m = self.size(1);
// https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do
// some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device.
const double n2 = n - .5;
const double n2_squared_minus_1 = n2 * n2 - 1;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "pdist_cuda", [&] {
if (p == 0.0) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::zero><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (p == 1.0) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::one><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (p == 2.0) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::two><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (std::isinf(p)) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::p><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
}
});
AT_CUDA_CHECK(cudaGetLastError());
}
void pdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& self, const double p, const Tensor& dist) {
if (p == 0.0 || grad.numel() == 0 || self.numel() == 0) {
result.fill_(0);
return;
}
const int64_t n = result.size(0);
int64_t m = self.size(1);
const int block_x = 64;
// NB: be careful with changing block_y; as it's currently written, grid_y is limited to be 2^16.
// From binary search, block_y of 16 gives us max pdist dim0 of 1449,
// block_y of 4 gives us max pdist dim0 of 725.
const int block_y = 16;
const int grid_x = (m + block_x * 8 - 1) / (block_x * 8);
const int grid_y = (dist.numel() + block_y - 1) / block_y;
const dim3 grid(grid_x, grid_y);
const dim3 block(block_x, block_y);
// https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do
// some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device.
const double n2 = n - .5;
const double n2_squared_minus_1 = n2 * n2 - 1;
Tensor buffer = at::empty({n - 1, result.size(0), result.size(1)}, result.options());
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "pdist_cuda_backward", [&] {
if (p == 1.0) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::one><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (p < 2.0) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::lt_two><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (p == 2.0) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::two><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (std::isinf(p)) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::p><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
}
});
AT_CUDA_CHECK(cudaGetLastError());
at::sum_out(result, buffer, 0);
}
void cdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& x1, const Tensor& x2, const double p, const Tensor& dist) {
if (p == 0.0 || grad.numel() == 0 || x1.numel() == 0 || x2.numel() == 0) {
result.fill_(0);
return;
}
const int64_t r1 = x1.size(-2);
const int64_t r2 = x2.size(-2);
const int64_t m = x1.size(-1);
const int block_x = 64;
const int block_y = 16;
const int grid_x = (m + block_x * 8 - 1) / (block_x * 8);
const int grid_y = (dist.numel() + block_y - 1) / block_y;
const dim3 grid(grid_x, grid_y);
const dim3 block(block_x, block_y);
const int64_t count = dist.numel();
Tensor buffer = at::empty({r2, r1, m}, result.options());
AT_DISPATCH_FLOATING_TYPES(result.scalar_type(), "cdist_cuda_backward", [&] {
if (p == 1.0) {
cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::one><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else if (p < 2.0) {
cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::lt_two><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else if (p == 2.0) {
cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::two><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else if (std::isinf(p)) {
cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else {
cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::p><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
}
});
AT_CUDA_CHECK(cudaGetLastError());
at::sum_out(result, buffer, 0);
}
} // anonymous namespace
REGISTER_DISPATCH(pdist_forward_stub, &pdist_forward_kernel_impl);
REGISTER_DISPATCH(pdist_backward_stub, &pdist_backward_kernel_impl);
REGISTER_DISPATCH(cdist_stub, &cdist_kernel_impl);
REGISTER_DISPATCH(cdist_backward_stub, &cdist_backward_kernel_impl);
}} // at::native
|
1d489d8ce21dc6263a039c1cd018eb7873766edd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
using namespace std;
#ifndef MAP_FILE
#define MAP_FILE MAP_SHARED
#endif
__global__ void elementwise_1D_1D_square(float* in, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in[tid] * in[tid];
}
|
1d489d8ce21dc6263a039c1cd018eb7873766edd.cu
|
#include "includes.h"
using namespace std;
#ifndef MAP_FILE
#define MAP_FILE MAP_SHARED
#endif
__global__ void elementwise_1D_1D_square(float* in, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in[tid] * in[tid];
}
|
a2b77522ff8181f6f4bd6d07c638da91742fb36c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
extern float toBW(int bytes, float sec);
__global__ void
saxpy_kernel(int N, float alpha, float* x, float* y, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
result[index] = alpha * x[index] + y[index];
}
void
saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) {
int totalBytes = sizeof(float) * 3 * N;
// compute number of blocks and threads per block
const int threadsPerBlock = 512;
const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
float* device_x;
float* device_y;
float* device_result;
//
// TODO: allocate device memory buffers on the GPU using
// hipMalloc. The started code issues warnings on build because
// these buffers are used in the call to saxpy_kernel below
// without being initialized.
//
int numBytes = sizeof(float) * N;
hipMalloc(&device_x, numBytes);
hipMalloc(&device_y, numBytes);
hipMalloc(&device_result, numBytes);
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
//
// TODO: copy input arrays to the GPU using hipMemcpy
//
hipMemcpy(device_x, xarray, numBytes, hipMemcpyHostToDevice);
hipMemcpy(device_y, yarray, numBytes, hipMemcpyHostToDevice);
hipMemcpy(device_result, resultarray, numBytes, hipMemcpyHostToDevice);
//
// TODO: insert time here to begin timing only the kernel
//
double startKernelTime = CycleTimer::currentSeconds();
// run saxpy_kernel on the GPU
hipLaunchKernelGGL(( saxpy_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, alpha, device_x, device_y, device_result);
//
// TODO: insert timer here to time only the kernel. Since the
// kernel will run asynchronously with the calling CPU thread, you
// need to call hipDeviceSynchronize() before your timer to
// ensure the kernel running on the GPU has completed. (Otherwise
// you will incorrectly observe that almost no time elapses!)
//
//hipDeviceSynchronize();
hipDeviceSynchronize();
double endKernelTime = CycleTimer::currentSeconds();
printf("Total time: %.3f ms\n", 1000.f * (endKernelTime - startKernelTime));
//
// TODO: copy result from GPU using hipMemcpy
//
hipMemcpy(xarray, device_x, numBytes, hipMemcpyDeviceToHost);
hipMemcpy(yarray, device_y, numBytes, hipMemcpyDeviceToHost);
hipMemcpy(resultarray, device_result, numBytes, cudamemcpyDeviceToHost);
// end timing after result has been copied back into host memory.
// The time elapsed between startTime and endTime is the total
// time to copy data to the GPU, run the kernel, and copy the
// result back to the CPU
double endTime = CycleTimer::currentSeconds();
hipError_t errCode = hipPeekAtLastError();
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode));
}
double overallDuration = endTime - startTime;
printf("Overall time: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration));
//
// TODO free memory buffers on the GPU
//
hipFree(device_x);
hipFree(device_y);
hipFree(device_result);
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
a2b77522ff8181f6f4bd6d07c638da91742fb36c.cu
|
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
extern float toBW(int bytes, float sec);
__global__ void
saxpy_kernel(int N, float alpha, float* x, float* y, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
result[index] = alpha * x[index] + y[index];
}
void
saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) {
int totalBytes = sizeof(float) * 3 * N;
// compute number of blocks and threads per block
const int threadsPerBlock = 512;
const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
float* device_x;
float* device_y;
float* device_result;
//
// TODO: allocate device memory buffers on the GPU using
// cudaMalloc. The started code issues warnings on build because
// these buffers are used in the call to saxpy_kernel below
// without being initialized.
//
int numBytes = sizeof(float) * N;
cudaMalloc(&device_x, numBytes);
cudaMalloc(&device_y, numBytes);
cudaMalloc(&device_result, numBytes);
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
//
// TODO: copy input arrays to the GPU using cudaMemcpy
//
cudaMemcpy(device_x, xarray, numBytes, cudaMemcpyHostToDevice);
cudaMemcpy(device_y, yarray, numBytes, cudaMemcpyHostToDevice);
cudaMemcpy(device_result, resultarray, numBytes, cudaMemcpyHostToDevice);
//
// TODO: insert time here to begin timing only the kernel
//
double startKernelTime = CycleTimer::currentSeconds();
// run saxpy_kernel on the GPU
saxpy_kernel<<<blocks, threadsPerBlock>>>(N, alpha, device_x, device_y, device_result);
//
// TODO: insert timer here to time only the kernel. Since the
// kernel will run asynchronously with the calling CPU thread, you
// need to call cudaThreadSynchronize() before your timer to
// ensure the kernel running on the GPU has completed. (Otherwise
// you will incorrectly observe that almost no time elapses!)
//
//cudaThreadSynchronize();
cudaThreadSynchronize();
double endKernelTime = CycleTimer::currentSeconds();
printf("Total time: %.3f ms\n", 1000.f * (endKernelTime - startKernelTime));
//
// TODO: copy result from GPU using cudaMemcpy
//
cudaMemcpy(xarray, device_x, numBytes, cudaMemcpyDeviceToHost);
cudaMemcpy(yarray, device_y, numBytes, cudaMemcpyDeviceToHost);
cudaMemcpy(resultarray, device_result, numBytes, cudamemcpyDeviceToHost);
// end timing after result has been copied back into host memory.
// The time elapsed between startTime and endTime is the total
// time to copy data to the GPU, run the kernel, and copy the
// result back to the CPU
double endTime = CycleTimer::currentSeconds();
cudaError_t errCode = cudaPeekAtLastError();
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode));
}
double overallDuration = endTime - startTime;
printf("Overall time: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration));
//
// TODO free memory buffers on the GPU
//
cudaFree(device_x);
cudaFree(device_y);
cudaFree(device_result);
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
3d1d6e49c97f8b18ace89bbff0b3d53ff80a7093.hip
|
// !!! This is a file automatically generated by hipify!!!
/********************************************************************
parallel.cu the parallel version of NN
Input:
/usr/local/cuda-10.1/bin/nvcc -arch=compute_52 -o parallel.out parallel.cu
./parallel.out block_size activationtype // block_size = 1 2 ... 32; activationtype=1 means sigomid and 2 means ReLU
Output:
elapsed_time - the elapsed time to perform the multiplication.
accuracy on training set and test set.
********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <math.h>
#include <iostream>
#include <string>
#include <vector>
#include <fstream>
#include <sstream>
#include<random>
using namespace std;
#define X_trn(x, y) X_trn[x * size_train + y] // 196 * 964
#define X_tst(x, y) X_tst[x * size_test + y] // 196 * 414
#define Y_trn(x, y) Y_trn[x * size_train + y] // 1 * 964
#define Y_tst(x, y) Y_tst[x * size_test + y] // 1 * 414
#define X(x, y) X[x * size_batch + y] // 196 * 964
#define Y(x, y) Y[x * size_batch + y] // 1 * 414
#define W1(x, y) W1[x * size_input + y] // 20 * 196
#define b1(x, y) b1[x * 1 + y] // 20 * 1
#define W2(x, y) W2[x * size_hidden + y] // 2 * 20
#define b2(x, y) b2[x * 1 + y] // 2 * 1
#define dW1(x, y) dW1[x * size_input + y] // 20 * 196
#define db1(x, y) db1[x * 1 + y] // 20 * 1
#define dW2(x, y) dW2[x * size_hidden + y] // 2 * 20
#define db2(x, y) db2[x * 1 + y] // 2 * 1
#define Z1(x, y) Z1[x * size_batch + y] // 20 * 964
#define A1(x, y) A1[x * size_batch + y] // 20 * 964
#define Z2(x, y) Z2[x * size_batch + y] // 2 * 964
#define A2(x, y) A2[x * size_batch + y] // 2 * 964
#define dZ1(x, y) dZ1[x * size_batch + y] // 20 * 964
#define dA1(x, y) dA1[x * size_batch + y] // 20 * 964
#define dZ2(x, y) dZ2[x * size_batch + y] // 2 * 964
#define dA2(x, y) dA2[x * size_batch + y] // 2 * 964
#define max_index(x, y) max_index[y] // 1 * 964
#define dev_X_trn(x, y) dev_X_trn[x * size_train + y] // 196 * 964
#define dev_X_tst(x, y) dev_X_tst[x * size_test + y] // 196 * 414
#define dev_Y_trn(x, y) dev_Y_trn[x * size_train + y] // 1 * 964
#define dev_Y_tst(x, y) dev_Y_tst[x * size_test + y] // 1 * 414
#define dev_X(x, y) dev_X[x * size_batch + y] // 196 * 964
#define dev_Y(x, y) dev_Y[x * size_batch + y] // 1 * 414
#define dev_W1(x, y) dev_W1[x * size_input + y] // 20 * 196
#define dev_b1(x, y) dev_b1[x * 1 + y] // 20 * 1
#define dev_W2(x, y) dev_W2[x * size_hidden + y] // 2 * 20
#define dev_b2(x, y) dev_b2[x * 1 + y] // 2 * 1
#define dev_dW1(x, y) dev_dW1[x * size_input + y] // 20 * 196
#define dev_db1(x, y) dev_db1[x * 1 + y] // 20 * 1
#define dev_dW2(x, y) dev_dW2[x * size_hidden + y] // 2 * 20
#define dev_db2(x, y) dev_db2[x * 1 + y] // 2 * 1
#define dev_Z1(x, y) dev_Z1[x * size_batch + y] // 20 * 964
#define dev_A1(x, y) dev_A1[x * size_batch + y] // 20 * 964
#define dev_Z2(x, y) dev_Z2[x * size_batch + y] // 2 * 964
#define dev_A2(x, y) dev_A2[x * size_batch + y] // 2 * 964
#define dev_dZ1(x, y) dev_dZ1[x * size_batch + y] // 20 * 964
#define dev_dA1(x, y) dev_dA1[x * size_batch + y] // 20 * 964
#define dev_dZ2(x, y) dev_dZ2[x * size_batch + y] // 2 * 964
#define dev_dA2(x, y) dev_dA2[x * size_batch + y] // 2 * 964
#define dev_max_index(x, y) dev_max_index[y] // 1 * 964
#define size_train 964
#define size_test 414
#define size_input 196
#define size_hidden 20
#define size_output 2
#define size_X size_input*size_batch
#define size_Y size_batch
#define size_W1 size_hidden*size_input
#define size_b1 size_hidden*1
#define size_W2 size_output*size_hidden
#define size_b2 size_output*1
#define size_dW1 size_hidden*size_input
#define size_db1 size_hidden*1
#define size_dW2 size_output*size_hidden
#define size_db2 size_output*1
#define size_Z1 size_hidden*size_batch
#define size_A1 size_hidden*size_batch
#define size_Z2 size_output*size_batch
#define size_A2 size_output*size_batch
#define size_dZ1 size_hidden*size_batch
#define size_dA1 size_hidden*size_batch
#define size_dZ2 size_output*size_batch
#define size_dA2 size_output*size_batch
#define size_max_index 1*size_batch
#define size_dev_max_index 1*size_batch
int size_batch = 0;
int *Y_trn, *Y_tst, *max_index, *dev_Y, *dev_max_index;
double *X_trn, *X_tst, *X, *W1, *b1, *W2, *b2, *dW1, *db1, *dW2, *db2, *Z1, *A1, *Z2, *A2, *dZ1, *dA1, *dZ2, *dA2;
double *dev_X, *dev_W1, *dev_b1, *dev_W2, *dev_b2, *dev_dW1, *dev_db1, *dev_dW2, *dev_db2, *dev_Z1, *dev_A1, *dev_Z2, *dev_A2, *dev_dZ1, *dev_dA1, *dev_dZ2, *dev_dA2;
hipStream_t stream1; // declare stream
hipStream_t stream2; // declare stream
hipStream_t stream3; // declare stream
void read_X(string data_path, double* array)
{
ifstream inFile(data_path);
string row;
int p;
p = 0;
string value;
while (getline(inFile, row)){
stringstream col(row);
while (getline(col, value, ',')){
array[p] = stod(value);
p++;
}
}
}
void read_Y(string data_path, int* array)
{
ifstream inFile(data_path);
string row;
int p;
p = 0;
string value;
while (getline(inFile, row)){
stringstream col(row);
while (getline(col, value, ',')){
array[p] = stod(value);
p++;
}
}
}
/* Set the value and reading data */
void read_data()
{
X_trn = (double *) malloc(size_input*size_train * sizeof(double)); // 196*964
Y_trn = (int *) malloc(size_train * sizeof(int)); // 1*964
X_tst = (double *) malloc(size_input*size_test * sizeof(double)); // 196*414
Y_tst = (int *) malloc(size_train * sizeof(int)); // 1*414
string X_trn_path = "X_trn.csv"; // Defined the name of cvs file
string Y_trn_path = "Y_trn.csv";
string X_tst_path = "X_tst.csv";
string Y_tst_path = "Y_tst.csv";
read_X(X_trn_path, X_trn); //Execution
read_Y(Y_trn_path, Y_trn);
read_X(X_tst_path, X_tst);
read_Y(Y_tst_path, Y_tst);
}
/* init W b */
void initialize_Wb() {
W1 = (double *) malloc(size_W1*sizeof(double)); // 20*196
b1 = (double *) malloc(size_b1*sizeof(double)); // 20*1
W2 = (double *) malloc(size_W2*sizeof(double)); // 2*20
b2 = (double *) malloc(size_b2*sizeof(double)); // 2*1
dW1 = (double *) malloc(size_dW1*sizeof(double)); // 20*196
db1 = (double *) malloc(size_db1*sizeof(double)); // 20*1
dW2 = (double *) malloc(size_dW2*sizeof(double)); // 2*20
db2 = (double *) malloc(size_db2*sizeof(double)); // 2*1
default_random_engine e;
uniform_real_distribution<double> u(-1,1);
for (int i = 0; i < size_W1; i++) {
W1[i] = u(e);
}
for (int i = 0; i < size_W2; i++) {
W2[i] = u(e);
}
for (int i = 0; i < size_b1; i++) {
b1[i] = 0;
}
for (int i = 0; i < size_b2; i++) {
b2[i] = 0;
}
}
/* init Z and A in the host */
void initialize_ZA(int size_batch)
{
Z1 = (double *) malloc(size_Z1*sizeof(double)); // 20*964
A1 = (double *) malloc(size_A1*sizeof(double)); // 20*964
Z2 = (double *) malloc(size_Z2*sizeof(double)); // 2*964
A2 = (double *) malloc(size_A2*sizeof(double)); // 2*964
dZ1 = (double *) malloc(size_dZ1*sizeof(double)); // 20*964
dA1 = (double *) malloc(size_dA1*sizeof(double)); // 20*964
dZ2 = (double *) malloc(size_dZ2*sizeof(double)); // 2*964
dA2 = (double *) malloc(size_dA2*sizeof(double)); // 2*964
max_index = (int *) malloc(size_max_index*sizeof(int)); // 1*964
}
/* init Z and A in the device */
void initialize_dev_ZA(int size_batch)
{
hipMalloc((void**)&dev_X, size_X * sizeof(double));
hipMalloc((void**)&dev_Y, size_Y * sizeof(int));
hipMalloc((void**)&dev_max_index, size_dev_max_index * sizeof(int));
hipMalloc((void**)&dev_Z1, size_Z1 * sizeof(double));
hipMalloc((void**)&dev_A1, size_A1 * sizeof(double));
hipMalloc((void**)&dev_Z2, size_Z2 * sizeof(double));
hipMalloc((void**)&dev_A2, size_A2 * sizeof(double));
}
/* free Z and A in the device */
void free_dev_ZA()
{
hipFree(dev_X);
hipFree(dev_Y);
hipFree(dev_max_index);
hipFree(dev_Z1);
hipFree(dev_A1);
hipFree(dev_Z2);
hipFree(dev_A2);
}
/* init W and b in the device */
void initialize_dev_Wb()
{
hipMalloc((void**)&dev_W1, size_W1 * sizeof(double));
hipMalloc((void**)&dev_b1, size_b1 * sizeof(double));
hipMalloc((void**)&dev_W2, size_W2 * sizeof(double));
hipMalloc((void**)&dev_b2, size_b2 * sizeof(double));
hipMalloc((void**)&dev_dW1, size_dW1 * sizeof(double));
hipMalloc((void**)&dev_db1, size_db1 * sizeof(double));
hipMalloc((void**)&dev_dW2, size_dW2 * sizeof(double));
hipMalloc((void**)&dev_db2, size_db2 * sizeof(double));
}
/* free W and b in the device */
void free_dev_Wb()
{
hipFree(dev_W1);
hipFree(dev_b1);
hipFree(dev_W2);
hipFree(dev_b2);
hipFree(dev_dW1);
hipFree(dev_db1);
hipFree(dev_dW2);
hipFree(dev_db2);
}
/* init dZ and dA in the host */
void initialize_dev_dZA(int size_batch)
{
hipMalloc((void**)&dev_dZ1, size_dZ1 * sizeof(double));
hipMalloc((void**)&dev_dA1, size_dA1 * sizeof(double));
hipMalloc((void**)&dev_dZ2, size_dZ2 * sizeof(double));
hipMalloc((void**)&dev_dA2, size_dA2 * sizeof(double));
}
/* free dZ and dA in the device */
void free_dev_dZA()
{
hipFree(dev_dZ1);
hipFree(dev_dA1);
hipFree(dev_dZ2);
hipFree(dev_dA2);
}
__global__ void HiddenLayer_Sigmoid(double* dev_X, double* dev_W1, double* dev_b1, double* dev_A1, double* dev_Z1, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W1(i,k) * dev_X(k,j);
dev_Z1(i,j) = partial + dev_b1(i,0);
dev_A1(i,j) = 1 / (1 + exp(0 - dev_Z1(i,j)));
}
__global__ void HiddenLayer_ReLU(double* dev_X, double* dev_W1, double* dev_b1, double* dev_A1, double* dev_Z1, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W1(i,k) * dev_X(k,j);
dev_Z1(i,j) = partial + dev_b1(i,0);
dev_A1(i,j) = dev_Z1(i,j) * (dev_Z1(i,j) > 0);
}
__global__ void OutputLayer(double* dev_A1, double* dev_W2, double* dev_b2, double* dev_Z2, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W2(i,k) * dev_A1(k,j);
dev_Z2(i,j) = partial + dev_b2(i,0);
}
// parallel for column part
__global__ void Softmax(double* dev_Z2, double* dev_A2, int* dev_max_index, int size_batch, int max_row, int max_col)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(j >= max_col)
return;
double max = dev_Z2(0, j), sum = 0;
dev_max_index[j] = 1;
for (int i = 1; i < max_row; i++) {
if (dev_Z2(i, j) > max){
max = dev_Z2(i, j);
dev_max_index[j] = 0;
}
}
for (int i = 0; i < max_row; i++)
sum += exp(dev_Z2(i, j));
for (int i = 0; i < max_row; i++)
dev_A2(i, j) = exp(dev_Z2(i, j)) / sum;
}
__global__ void Back_dZ2 (double* dev_A2, int* dev_Y_trn, double* dev_dZ2, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
dev_dZ2(0, j) = (dev_A2(0, j) - dev_Y_trn(0, j)) / size_batch;
dev_dZ2(1, j) = (dev_Y_trn(0, j) - dev_A2(0, j)) / size_batch;
}
// dW1(20*196) = dZ1(20*964) * X(196*964)
// dW2(2*20) = dZ2(2*964) * A1(20*964)
__global__ void Back_dW (double* dev_A, double* dev_dZ, double* dev_dW, int size_batch, int W_col, int max_row, int max_col)
{
int k;
double tmp = 0.0;
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
int j = threadIdx.x + blockIdx.x * blockDim.x; // j/x -> col
if(i >= max_row || j >= max_col)
return;
for (k = 0; k < size_batch; k++)
tmp += dev_dZ[i*size_batch+k] * dev_A[j*size_batch+k];
dev_dW[i*W_col+j] = tmp;
}
// db1(20*1) is from dZ1(20*964)
// db2(2*1) is from dZ1(2*964)
__global__ void Back_db(double* dev_dZ, double* dev_db, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
if(i >= max_row)
return;
double tmp = 0;
for(int j = 0; j < max_col; j++) {
tmp += dev_dZ[i*size_batch+j];
}
dev_db[i*1+0] = tmp;
}
__global__ void Back_dA1 (double* dev_W2, double* dev_dZ2, double* dev_dA1, int size_batch, int K, int max_row, int max_col)
{
// dA1(20*964) = dZ2(2*964) * W2(2*20)
int k;
double partial = 0.0;
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
int j = threadIdx.x + blockIdx.x * blockDim.x; // j/x -> col
if(i >= max_row || j >= max_col)
return;
for (k = 0; k < K; k++)
partial += dev_W2(k,i) * dev_dZ2(k,j);
dev_dA1(i,j) = partial;
}
__global__ void Back_dZ1_Sigmoid (double* dev_dA1, double* dev_A1, double* dev_Z1, double* dev_dZ1, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
dev_dZ1(i, j) = dev_dA1(i, j) * dev_A1(i, j) * (1-dev_A1(i, j)); // dZ1 = dA1*A1*(1-A1)
}
__global__ void Back_dZ1_ReLU (double* dev_dA1, double* dev_A1, double* dev_Z1, double* dev_dZ1, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
if(dev_Z1(i, j) < 0)
dev_dZ1(i, j) = 0;
else
dev_dZ1(i, j) = dev_dA1(i, j); //dZ1 = dA1*Z1_mask
}
__global__ void update_Wb(double* dev_dWb, double* dev_Wb, int col, double learn_rate, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
dev_Wb[i*col+j] = dev_Wb[i*col+j] - learn_rate * dev_dWb[i*col+j];
}
/* forward to calculate A Z preY */
void forward(double* X, int* Y, string type, int acti_type, int block_size){
if(type == "train"){
size_batch = size_train;
}
else{
size_batch = size_test;
}
// init Z and A in the host
initialize_ZA(size_batch);
// init X Y W b Z A in the device
initialize_dev_ZA(size_batch);
dim3 dimBlock(block_size, block_size);
// hidden layer and activation function to get Z1 and A1
dim3 dimGrid1((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden+ dimBlock.y - 1)/ dimBlock.y);
hipMemcpy(dev_W1, W1, size_W1 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_b1, b1, size_b1 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_X, X, size_X * sizeof(double), hipMemcpyHostToDevice);
if(acti_type == 1)
hipLaunchKernelGGL(( HiddenLayer_Sigmoid), dim3(dimGrid1), dim3(dimBlock), 0, 0, dev_X, dev_W1, dev_b1, dev_A1, dev_Z1, size_input, size_batch, size_hidden, size_batch);
else if(acti_type == 2)
hipLaunchKernelGGL(( HiddenLayer_ReLU), dim3(dimGrid1), dim3(dimBlock), 0, 0, dev_X, dev_W1, dev_b1, dev_A1, dev_Z1, size_input, size_batch, size_hidden, size_batch);
hipMemcpy(Z1, dev_Z1, size_Z1 * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(A1, dev_A1, size_A1 * sizeof(double), hipMemcpyDeviceToHost);
// output layer to get Z2
dim3 dimGrid2((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
hipMemcpy(dev_W2, W2, size_W2 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_b2, b2, size_b2 * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( OutputLayer), dim3(dimGrid2), dim3(dimBlock), 0, 0, dev_A1, dev_W2, dev_b2, dev_Z2, size_hidden, size_batch, size_output, size_batch);
hipMemcpy(Z2, dev_Z2, size_Z2 * sizeof(double), hipMemcpyDeviceToHost);
// softmax layer to get A2 and max_index
dim3 dimGrid3((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
hipLaunchKernelGGL(( Softmax), dim3(dimGrid3), dim3(dimBlock), 0, 0, dev_Z2, dev_A2, dev_max_index, size_batch, size_output, size_batch);
hipMemcpy(A2, dev_A2, size_A2 * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(max_index, dev_max_index, size_max_index * sizeof(int), hipMemcpyDeviceToHost);
free_dev_ZA();
}
/* calculate loss */
double cross_entropy_loss(int* Y, double* A2, int col)
{
double loss = 0;
for(int c = 0; c < col; c++) {
loss += -log(A2(0, c)) * Y(0, c) - log(A2(1, c)) * (1-Y(0, c));
}
return loss/col;
}
/* backward to calculate dW db */
void backprop(double* X, int* Y, int acti_type, int block_size) { // type = 1 is Sigmoid
size_batch = size_train;
initialize_dev_ZA(size_batch);
dim3 dimBlock(block_size, block_size);
// get dZ2
dim3 dimGrid1((size_batch + dimBlock.x - 1)/ dimBlock.x, (1 + dimBlock.y - 1)/ dimBlock.y);
hipMemcpyAsync(dev_A2, A2, size_A2 * sizeof(double), hipMemcpyHostToDevice, stream2);
hipMemcpyAsync(dev_Y, Y, size_Y * sizeof(int), hipMemcpyHostToDevice, stream2);
hipMemcpyAsync(dev_dZ2, dZ2, size_dZ2 * sizeof(double), hipMemcpyHostToDevice, stream2);
hipLaunchKernelGGL(( Back_dZ2), dim3(dimGrid1), dim3(dimBlock), 0, stream2, dev_A2, dev_Y, dev_dZ2, size_batch, 1, size_batch);
hipMemcpyAsync(dZ2, dev_dZ2, size_dZ2 * sizeof(double), hipMemcpyDeviceToHost, stream2);
// get dw2
dim3 dimGrid2((size_hidden + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
hipMemcpyAsync(dev_A1, A1, size_A1 * sizeof(double), hipMemcpyHostToDevice, stream2);
hipLaunchKernelGGL(( Back_dW), dim3(dimGrid2), dim3(dimBlock), 0, stream2, dev_A1, dev_dZ2, dev_dW2, size_batch, size_hidden, size_output, size_hidden);
hipMemcpyAsync(dW2, dev_dW2, size_dW2 * sizeof(double), hipMemcpyDeviceToHost, stream2);
// get db2
dim3 dimGrid3((1 + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
hipLaunchKernelGGL(( Back_db), dim3(dimGrid3), dim3(dimBlock), 0, stream2, dev_dZ2, dev_db2, size_batch, size_output, size_batch);
hipMemcpyAsync(db2, dev_db2, size_db2 * sizeof(double), hipMemcpyDeviceToHost, stream2);
// get dA1
dim3 dimGrid4((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
hipMemcpyAsync(dev_W2, W2, size_W2 * sizeof(double), hipMemcpyHostToDevice, stream2);
hipLaunchKernelGGL(( Back_dA1), dim3(dimGrid4), dim3(dimBlock), 0, stream2, dev_W2, dev_dZ2, dev_dA1, size_batch, size_output, size_hidden, size_batch);
hipMemcpyAsync(dA1, dev_dA1, size_dA1 * sizeof(double), hipMemcpyDeviceToHost, stream2);
// get dZ1
dim3 dimGrid5((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
hipMemcpyAsync(dev_A1, A1, size_A1 * sizeof(double), hipMemcpyHostToDevice, stream2);
hipMemcpyAsync(dev_Z1, Z1, size_Z1 * sizeof(double), hipMemcpyHostToDevice, stream2);
if(acti_type == 1)
hipLaunchKernelGGL(( Back_dZ1_Sigmoid), dim3(dimGrid5), dim3(dimBlock), 0, stream2, dev_dA1, dev_A1, dev_Z1, dev_dZ1, size_batch, size_hidden, size_batch);
else if(acti_type == 2)
hipLaunchKernelGGL(( Back_dZ1_ReLU), dim3(dimGrid5), dim3(dimBlock), 0, stream2, dev_dA1, dev_A1, dev_Z1, dev_dZ1, size_batch, size_hidden, size_batch);
hipMemcpyAsync(dZ1, dev_dZ1, size_dZ1 * sizeof(double), hipMemcpyDeviceToHost, stream2);
// get dW1
dim3 dimGrid6((size_input + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
hipMemcpyAsync(dev_X, X, size_X * sizeof(double), hipMemcpyHostToDevice, stream2);
hipLaunchKernelGGL(( Back_dW), dim3(dimGrid6), dim3(dimBlock), 0, stream2, dev_X, dev_dZ1, dev_dW1, size_batch, size_input, size_hidden, size_input);
hipMemcpyAsync(dW1, dev_dW1, size_dW1 * sizeof(double), hipMemcpyDeviceToHost, stream2);
// get b1
dim3 dimGrid7((1 + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
hipLaunchKernelGGL(( Back_db), dim3(dimGrid7), dim3(dimBlock), 0, stream2, dev_dZ1, dev_db1, size_batch, size_hidden, size_batch);
hipMemcpyAsync(db1, dev_db1, size_db1 * sizeof(double), hipMemcpyDeviceToHost, stream2);
// free ZA on device
free_dev_ZA();
}
/* update W b */
void updateParameter(double learn_rate, int block_size)
{
// hipSetDevice(0);
dim3 dimBlock(block_size, block_size);
// update w1
hipMemcpyAsync(dev_dW1, dW1, size_dW1 * sizeof(double), hipMemcpyHostToDevice, stream1);
hipMemcpyAsync(dev_W1, W1, size_W1 * sizeof(double), hipMemcpyHostToDevice, stream1);
dim3 dimGrid1((size_input + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
hipLaunchKernelGGL(( update_Wb), dim3(dimGrid1), dim3(dimBlock), 0, stream1, dev_dW1, dev_W1, size_input, learn_rate, size_hidden, size_input);
hipMemcpyAsync(W1, dev_W1, size_W1 * sizeof(double), hipMemcpyDeviceToHost, stream1);
// update b1
hipMemcpyAsync(dev_db1, db1, size_db1 * sizeof(double), hipMemcpyHostToDevice, stream1);
hipMemcpyAsync(dev_b1, b1, size_b1 * sizeof(double), hipMemcpyHostToDevice, stream1);
dim3 dimGrid2((1 + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
hipLaunchKernelGGL(( update_Wb), dim3(dimGrid2), dim3(dimBlock), 0, stream1, dev_db1, dev_b1, 1, learn_rate, size_hidden, 1);
hipMemcpyAsync(b1, dev_b1, size_b1 * sizeof(double), hipMemcpyDeviceToHost, stream1);
// update w2
hipMemcpyAsync(dev_dW2, dW2, size_dW2 * sizeof(double), hipMemcpyHostToDevice, stream1);
hipMemcpyAsync(dev_W2, W2, size_W2 * sizeof(double), hipMemcpyHostToDevice, stream1);
dim3 dimGrid3((size_hidden + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
hipLaunchKernelGGL(( update_Wb), dim3(dimGrid3), dim3(dimBlock), 0, stream1, dev_dW2, dev_W2, size_hidden, learn_rate, size_output, size_hidden);
hipMemcpyAsync(W2, dev_W2, size_W2 * sizeof(double), hipMemcpyDeviceToHost, stream1);
// update b2
hipMemcpyAsync(dev_db2, db2, size_db2 * sizeof(double), hipMemcpyHostToDevice, stream1);
hipMemcpyAsync(dev_b2, b2, size_b2 * sizeof(double), hipMemcpyHostToDevice, stream1);
dim3 dimGrid4((1 + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
hipLaunchKernelGGL(( update_Wb), dim3(dimGrid4), dim3(dimBlock), 0, stream1, dev_db2, dev_b2, 1, learn_rate, size_output, 1);
hipMemcpyAsync(b2, dev_b2, size_b2 * sizeof(double), hipMemcpyDeviceToHost, stream1);
// synchron stream
hipStreamSynchronize(stream1);
}
double accuracy(int* max_index, int* Y, int size_batch)
{
int i;
double count = 0;
for(i = 0; i < size_batch; i++) {
if(Y(0, i) == max_index(0, i))
count += 1;
}
return count/double(size_batch);
}
void train(double* X_trn, int* Y_trn, int acti_type, int block_size)
{
forward(X_trn, Y_trn, "train", acti_type, block_size);
backprop(X_trn, Y_trn, acti_type, block_size); // 1 Sigmoid 2 ReLU
updateParameter(0.01, block_size);
}
double test(double* X, int* Y, string type, int acti_type, int block_size)
{
forward(X, Y, type, acti_type, block_size);
if(type == "train")
return accuracy(max_index, Y, size_train);
else
return accuracy(max_index, Y, size_test);
}
int main(int argc, char *argv[])
{
int block_size;
int epochs = 20000;
int acti_type;
double acc_trn, acc_tst;
if ( argc < 3 ){
printf(" Usage: first argument: block size \n");
printf(" second argument: activation type \n");
return -1;
} else if ( argc > 3 ) {
printf("\n Too many arguments. \n");
return -1;
} else {
block_size = atoi(argv[1]);
acti_type = atoi(argv[2]);
}
initialize_Wb();
initialize_dev_Wb();
initialize_dev_dZA(size_train);
read_data();
float elapsed_time = 0.0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipStreamCreate (&stream1);
hipStreamCreate (&stream2);
hipStreamCreate (&stream3);
for(int e = 0; e < epochs; e++) {
train(X_trn, Y_trn, acti_type, block_size);
// double loss = cross_entropy_loss(Y_trn, A2, size_train);
// printf("%f\n", loss);
// printf("the %d epoch, the training loss is: %f \n", e, loss);
}
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
hipStreamDestroy(stream3);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
printf( "Elapsed Time: %.4e msec. \n", elapsed_time );
acc_trn = test(X_trn, Y_trn, "train", acti_type, block_size);
acc_tst = test(X_tst, Y_tst, "test", acti_type, block_size);
printf("the training accuracy is: %f, the test accuracy is: %f\n", acc_trn, acc_tst);
free_dev_Wb();
free_dev_dZA();
}
|
3d1d6e49c97f8b18ace89bbff0b3d53ff80a7093.cu
|
/********************************************************************
parallel.cu the parallel version of NN
Input:
/usr/local/cuda-10.1/bin/nvcc -arch=compute_52 -o parallel.out parallel.cu
./parallel.out block_size activationtype // block_size = 1 2 ... 32; activationtype=1 means sigomid and 2 means ReLU
Output:
elapsed_time - the elapsed time to perform the multiplication.
accuracy on training set and test set.
********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <cuda_runtime.h>
#include <time.h>
#include <math.h>
#include <iostream>
#include <string>
#include <vector>
#include <fstream>
#include <sstream>
#include<random>
using namespace std;
#define X_trn(x, y) X_trn[x * size_train + y] // 196 * 964
#define X_tst(x, y) X_tst[x * size_test + y] // 196 * 414
#define Y_trn(x, y) Y_trn[x * size_train + y] // 1 * 964
#define Y_tst(x, y) Y_tst[x * size_test + y] // 1 * 414
#define X(x, y) X[x * size_batch + y] // 196 * 964
#define Y(x, y) Y[x * size_batch + y] // 1 * 414
#define W1(x, y) W1[x * size_input + y] // 20 * 196
#define b1(x, y) b1[x * 1 + y] // 20 * 1
#define W2(x, y) W2[x * size_hidden + y] // 2 * 20
#define b2(x, y) b2[x * 1 + y] // 2 * 1
#define dW1(x, y) dW1[x * size_input + y] // 20 * 196
#define db1(x, y) db1[x * 1 + y] // 20 * 1
#define dW2(x, y) dW2[x * size_hidden + y] // 2 * 20
#define db2(x, y) db2[x * 1 + y] // 2 * 1
#define Z1(x, y) Z1[x * size_batch + y] // 20 * 964
#define A1(x, y) A1[x * size_batch + y] // 20 * 964
#define Z2(x, y) Z2[x * size_batch + y] // 2 * 964
#define A2(x, y) A2[x * size_batch + y] // 2 * 964
#define dZ1(x, y) dZ1[x * size_batch + y] // 20 * 964
#define dA1(x, y) dA1[x * size_batch + y] // 20 * 964
#define dZ2(x, y) dZ2[x * size_batch + y] // 2 * 964
#define dA2(x, y) dA2[x * size_batch + y] // 2 * 964
#define max_index(x, y) max_index[y] // 1 * 964
#define dev_X_trn(x, y) dev_X_trn[x * size_train + y] // 196 * 964
#define dev_X_tst(x, y) dev_X_tst[x * size_test + y] // 196 * 414
#define dev_Y_trn(x, y) dev_Y_trn[x * size_train + y] // 1 * 964
#define dev_Y_tst(x, y) dev_Y_tst[x * size_test + y] // 1 * 414
#define dev_X(x, y) dev_X[x * size_batch + y] // 196 * 964
#define dev_Y(x, y) dev_Y[x * size_batch + y] // 1 * 414
#define dev_W1(x, y) dev_W1[x * size_input + y] // 20 * 196
#define dev_b1(x, y) dev_b1[x * 1 + y] // 20 * 1
#define dev_W2(x, y) dev_W2[x * size_hidden + y] // 2 * 20
#define dev_b2(x, y) dev_b2[x * 1 + y] // 2 * 1
#define dev_dW1(x, y) dev_dW1[x * size_input + y] // 20 * 196
#define dev_db1(x, y) dev_db1[x * 1 + y] // 20 * 1
#define dev_dW2(x, y) dev_dW2[x * size_hidden + y] // 2 * 20
#define dev_db2(x, y) dev_db2[x * 1 + y] // 2 * 1
#define dev_Z1(x, y) dev_Z1[x * size_batch + y] // 20 * 964
#define dev_A1(x, y) dev_A1[x * size_batch + y] // 20 * 964
#define dev_Z2(x, y) dev_Z2[x * size_batch + y] // 2 * 964
#define dev_A2(x, y) dev_A2[x * size_batch + y] // 2 * 964
#define dev_dZ1(x, y) dev_dZ1[x * size_batch + y] // 20 * 964
#define dev_dA1(x, y) dev_dA1[x * size_batch + y] // 20 * 964
#define dev_dZ2(x, y) dev_dZ2[x * size_batch + y] // 2 * 964
#define dev_dA2(x, y) dev_dA2[x * size_batch + y] // 2 * 964
#define dev_max_index(x, y) dev_max_index[y] // 1 * 964
#define size_train 964
#define size_test 414
#define size_input 196
#define size_hidden 20
#define size_output 2
#define size_X size_input*size_batch
#define size_Y size_batch
#define size_W1 size_hidden*size_input
#define size_b1 size_hidden*1
#define size_W2 size_output*size_hidden
#define size_b2 size_output*1
#define size_dW1 size_hidden*size_input
#define size_db1 size_hidden*1
#define size_dW2 size_output*size_hidden
#define size_db2 size_output*1
#define size_Z1 size_hidden*size_batch
#define size_A1 size_hidden*size_batch
#define size_Z2 size_output*size_batch
#define size_A2 size_output*size_batch
#define size_dZ1 size_hidden*size_batch
#define size_dA1 size_hidden*size_batch
#define size_dZ2 size_output*size_batch
#define size_dA2 size_output*size_batch
#define size_max_index 1*size_batch
#define size_dev_max_index 1*size_batch
int size_batch = 0;
int *Y_trn, *Y_tst, *max_index, *dev_Y, *dev_max_index;
double *X_trn, *X_tst, *X, *W1, *b1, *W2, *b2, *dW1, *db1, *dW2, *db2, *Z1, *A1, *Z2, *A2, *dZ1, *dA1, *dZ2, *dA2;
double *dev_X, *dev_W1, *dev_b1, *dev_W2, *dev_b2, *dev_dW1, *dev_db1, *dev_dW2, *dev_db2, *dev_Z1, *dev_A1, *dev_Z2, *dev_A2, *dev_dZ1, *dev_dA1, *dev_dZ2, *dev_dA2;
cudaStream_t stream1; // declare stream
cudaStream_t stream2; // declare stream
cudaStream_t stream3; // declare stream
void read_X(string data_path, double* array)
{
ifstream inFile(data_path);
string row;
int p;
p = 0;
string value;
while (getline(inFile, row)){
stringstream col(row);
while (getline(col, value, ',')){
array[p] = stod(value);
p++;
}
}
}
void read_Y(string data_path, int* array)
{
ifstream inFile(data_path);
string row;
int p;
p = 0;
string value;
while (getline(inFile, row)){
stringstream col(row);
while (getline(col, value, ',')){
array[p] = stod(value);
p++;
}
}
}
/* Set the value and reading data */
void read_data()
{
X_trn = (double *) malloc(size_input*size_train * sizeof(double)); // 196*964
Y_trn = (int *) malloc(size_train * sizeof(int)); // 1*964
X_tst = (double *) malloc(size_input*size_test * sizeof(double)); // 196*414
Y_tst = (int *) malloc(size_train * sizeof(int)); // 1*414
string X_trn_path = "X_trn.csv"; // Defined the name of cvs file
string Y_trn_path = "Y_trn.csv";
string X_tst_path = "X_tst.csv";
string Y_tst_path = "Y_tst.csv";
read_X(X_trn_path, X_trn); //Execution
read_Y(Y_trn_path, Y_trn);
read_X(X_tst_path, X_tst);
read_Y(Y_tst_path, Y_tst);
}
/* init W b */
void initialize_Wb() {
W1 = (double *) malloc(size_W1*sizeof(double)); // 20*196
b1 = (double *) malloc(size_b1*sizeof(double)); // 20*1
W2 = (double *) malloc(size_W2*sizeof(double)); // 2*20
b2 = (double *) malloc(size_b2*sizeof(double)); // 2*1
dW1 = (double *) malloc(size_dW1*sizeof(double)); // 20*196
db1 = (double *) malloc(size_db1*sizeof(double)); // 20*1
dW2 = (double *) malloc(size_dW2*sizeof(double)); // 2*20
db2 = (double *) malloc(size_db2*sizeof(double)); // 2*1
default_random_engine e;
uniform_real_distribution<double> u(-1,1);
for (int i = 0; i < size_W1; i++) {
W1[i] = u(e);
}
for (int i = 0; i < size_W2; i++) {
W2[i] = u(e);
}
for (int i = 0; i < size_b1; i++) {
b1[i] = 0;
}
for (int i = 0; i < size_b2; i++) {
b2[i] = 0;
}
}
/* init Z and A in the host */
void initialize_ZA(int size_batch)
{
Z1 = (double *) malloc(size_Z1*sizeof(double)); // 20*964
A1 = (double *) malloc(size_A1*sizeof(double)); // 20*964
Z2 = (double *) malloc(size_Z2*sizeof(double)); // 2*964
A2 = (double *) malloc(size_A2*sizeof(double)); // 2*964
dZ1 = (double *) malloc(size_dZ1*sizeof(double)); // 20*964
dA1 = (double *) malloc(size_dA1*sizeof(double)); // 20*964
dZ2 = (double *) malloc(size_dZ2*sizeof(double)); // 2*964
dA2 = (double *) malloc(size_dA2*sizeof(double)); // 2*964
max_index = (int *) malloc(size_max_index*sizeof(int)); // 1*964
}
/* init Z and A in the device */
void initialize_dev_ZA(int size_batch)
{
cudaMalloc((void**)&dev_X, size_X * sizeof(double));
cudaMalloc((void**)&dev_Y, size_Y * sizeof(int));
cudaMalloc((void**)&dev_max_index, size_dev_max_index * sizeof(int));
cudaMalloc((void**)&dev_Z1, size_Z1 * sizeof(double));
cudaMalloc((void**)&dev_A1, size_A1 * sizeof(double));
cudaMalloc((void**)&dev_Z2, size_Z2 * sizeof(double));
cudaMalloc((void**)&dev_A2, size_A2 * sizeof(double));
}
/* free Z and A in the device */
void free_dev_ZA()
{
cudaFree(dev_X);
cudaFree(dev_Y);
cudaFree(dev_max_index);
cudaFree(dev_Z1);
cudaFree(dev_A1);
cudaFree(dev_Z2);
cudaFree(dev_A2);
}
/* init W and b in the device */
void initialize_dev_Wb()
{
cudaMalloc((void**)&dev_W1, size_W1 * sizeof(double));
cudaMalloc((void**)&dev_b1, size_b1 * sizeof(double));
cudaMalloc((void**)&dev_W2, size_W2 * sizeof(double));
cudaMalloc((void**)&dev_b2, size_b2 * sizeof(double));
cudaMalloc((void**)&dev_dW1, size_dW1 * sizeof(double));
cudaMalloc((void**)&dev_db1, size_db1 * sizeof(double));
cudaMalloc((void**)&dev_dW2, size_dW2 * sizeof(double));
cudaMalloc((void**)&dev_db2, size_db2 * sizeof(double));
}
/* free W and b in the device */
void free_dev_Wb()
{
cudaFree(dev_W1);
cudaFree(dev_b1);
cudaFree(dev_W2);
cudaFree(dev_b2);
cudaFree(dev_dW1);
cudaFree(dev_db1);
cudaFree(dev_dW2);
cudaFree(dev_db2);
}
/* init dZ and dA in the host */
void initialize_dev_dZA(int size_batch)
{
cudaMalloc((void**)&dev_dZ1, size_dZ1 * sizeof(double));
cudaMalloc((void**)&dev_dA1, size_dA1 * sizeof(double));
cudaMalloc((void**)&dev_dZ2, size_dZ2 * sizeof(double));
cudaMalloc((void**)&dev_dA2, size_dA2 * sizeof(double));
}
/* free dZ and dA in the device */
void free_dev_dZA()
{
cudaFree(dev_dZ1);
cudaFree(dev_dA1);
cudaFree(dev_dZ2);
cudaFree(dev_dA2);
}
__global__ void HiddenLayer_Sigmoid(double* dev_X, double* dev_W1, double* dev_b1, double* dev_A1, double* dev_Z1, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W1(i,k) * dev_X(k,j);
dev_Z1(i,j) = partial + dev_b1(i,0);
dev_A1(i,j) = 1 / (1 + exp(0 - dev_Z1(i,j)));
}
__global__ void HiddenLayer_ReLU(double* dev_X, double* dev_W1, double* dev_b1, double* dev_A1, double* dev_Z1, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W1(i,k) * dev_X(k,j);
dev_Z1(i,j) = partial + dev_b1(i,0);
dev_A1(i,j) = dev_Z1(i,j) * (dev_Z1(i,j) > 0);
}
__global__ void OutputLayer(double* dev_A1, double* dev_W2, double* dev_b2, double* dev_Z2, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W2(i,k) * dev_A1(k,j);
dev_Z2(i,j) = partial + dev_b2(i,0);
}
// parallel for column part
__global__ void Softmax(double* dev_Z2, double* dev_A2, int* dev_max_index, int size_batch, int max_row, int max_col)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(j >= max_col)
return;
double max = dev_Z2(0, j), sum = 0;
dev_max_index[j] = 1;
for (int i = 1; i < max_row; i++) {
if (dev_Z2(i, j) > max){
max = dev_Z2(i, j);
dev_max_index[j] = 0;
}
}
for (int i = 0; i < max_row; i++)
sum += exp(dev_Z2(i, j));
for (int i = 0; i < max_row; i++)
dev_A2(i, j) = exp(dev_Z2(i, j)) / sum;
}
__global__ void Back_dZ2 (double* dev_A2, int* dev_Y_trn, double* dev_dZ2, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
dev_dZ2(0, j) = (dev_A2(0, j) - dev_Y_trn(0, j)) / size_batch;
dev_dZ2(1, j) = (dev_Y_trn(0, j) - dev_A2(0, j)) / size_batch;
}
// dW1(20*196) = dZ1(20*964) * X(196*964)
// dW2(2*20) = dZ2(2*964) * A1(20*964)
__global__ void Back_dW (double* dev_A, double* dev_dZ, double* dev_dW, int size_batch, int W_col, int max_row, int max_col)
{
int k;
double tmp = 0.0;
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
int j = threadIdx.x + blockIdx.x * blockDim.x; // j/x -> col
if(i >= max_row || j >= max_col)
return;
for (k = 0; k < size_batch; k++)
tmp += dev_dZ[i*size_batch+k] * dev_A[j*size_batch+k];
dev_dW[i*W_col+j] = tmp;
}
// db1(20*1) is from dZ1(20*964)
// db2(2*1) is from dZ1(2*964)
__global__ void Back_db(double* dev_dZ, double* dev_db, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
if(i >= max_row)
return;
double tmp = 0;
for(int j = 0; j < max_col; j++) {
tmp += dev_dZ[i*size_batch+j];
}
dev_db[i*1+0] = tmp;
}
__global__ void Back_dA1 (double* dev_W2, double* dev_dZ2, double* dev_dA1, int size_batch, int K, int max_row, int max_col)
{
// dA1(20*964) = dZ2(2*964) * W2(2*20)
int k;
double partial = 0.0;
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
int j = threadIdx.x + blockIdx.x * blockDim.x; // j/x -> col
if(i >= max_row || j >= max_col)
return;
for (k = 0; k < K; k++)
partial += dev_W2(k,i) * dev_dZ2(k,j);
dev_dA1(i,j) = partial;
}
__global__ void Back_dZ1_Sigmoid (double* dev_dA1, double* dev_A1, double* dev_Z1, double* dev_dZ1, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
dev_dZ1(i, j) = dev_dA1(i, j) * dev_A1(i, j) * (1-dev_A1(i, j)); // dZ1 = dA1*A1*(1-A1)
}
__global__ void Back_dZ1_ReLU (double* dev_dA1, double* dev_A1, double* dev_Z1, double* dev_dZ1, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
if(dev_Z1(i, j) < 0)
dev_dZ1(i, j) = 0;
else
dev_dZ1(i, j) = dev_dA1(i, j); //dZ1 = dA1*Z1_mask
}
__global__ void update_Wb(double* dev_dWb, double* dev_Wb, int col, double learn_rate, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
dev_Wb[i*col+j] = dev_Wb[i*col+j] - learn_rate * dev_dWb[i*col+j];
}
/* forward to calculate A Z preY */
void forward(double* X, int* Y, string type, int acti_type, int block_size){
if(type == "train"){
size_batch = size_train;
}
else{
size_batch = size_test;
}
// init Z and A in the host
initialize_ZA(size_batch);
// init X Y W b Z A in the device
initialize_dev_ZA(size_batch);
dim3 dimBlock(block_size, block_size);
// hidden layer and activation function to get Z1 and A1
dim3 dimGrid1((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden+ dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_W1, W1, size_W1 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b1, b1, size_b1 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_X, X, size_X * sizeof(double), cudaMemcpyHostToDevice);
if(acti_type == 1)
HiddenLayer_Sigmoid<<<dimGrid1, dimBlock>>>(dev_X, dev_W1, dev_b1, dev_A1, dev_Z1, size_input, size_batch, size_hidden, size_batch);
else if(acti_type == 2)
HiddenLayer_ReLU<<<dimGrid1, dimBlock>>>(dev_X, dev_W1, dev_b1, dev_A1, dev_Z1, size_input, size_batch, size_hidden, size_batch);
cudaMemcpy(Z1, dev_Z1, size_Z1 * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(A1, dev_A1, size_A1 * sizeof(double), cudaMemcpyDeviceToHost);
// output layer to get Z2
dim3 dimGrid2((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_W2, W2, size_W2 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b2, b2, size_b2 * sizeof(double), cudaMemcpyHostToDevice);
OutputLayer<<<dimGrid2, dimBlock>>>(dev_A1, dev_W2, dev_b2, dev_Z2, size_hidden, size_batch, size_output, size_batch);
cudaMemcpy(Z2, dev_Z2, size_Z2 * sizeof(double), cudaMemcpyDeviceToHost);
// softmax layer to get A2 and max_index
dim3 dimGrid3((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
Softmax<<<dimGrid3, dimBlock>>>(dev_Z2, dev_A2, dev_max_index, size_batch, size_output, size_batch);
cudaMemcpy(A2, dev_A2, size_A2 * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(max_index, dev_max_index, size_max_index * sizeof(int), cudaMemcpyDeviceToHost);
free_dev_ZA();
}
/* calculate loss */
double cross_entropy_loss(int* Y, double* A2, int col)
{
double loss = 0;
for(int c = 0; c < col; c++) {
loss += -log(A2(0, c)) * Y(0, c) - log(A2(1, c)) * (1-Y(0, c));
}
return loss/col;
}
/* backward to calculate dW db */
void backprop(double* X, int* Y, int acti_type, int block_size) { // type = 1 is Sigmoid
size_batch = size_train;
initialize_dev_ZA(size_batch);
dim3 dimBlock(block_size, block_size);
// get dZ2
dim3 dimGrid1((size_batch + dimBlock.x - 1)/ dimBlock.x, (1 + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpyAsync(dev_A2, A2, size_A2 * sizeof(double), cudaMemcpyHostToDevice, stream2);
cudaMemcpyAsync(dev_Y, Y, size_Y * sizeof(int), cudaMemcpyHostToDevice, stream2);
cudaMemcpyAsync(dev_dZ2, dZ2, size_dZ2 * sizeof(double), cudaMemcpyHostToDevice, stream2);
Back_dZ2<<<dimGrid1, dimBlock, 0, stream2>>>(dev_A2, dev_Y, dev_dZ2, size_batch, 1, size_batch);
cudaMemcpyAsync(dZ2, dev_dZ2, size_dZ2 * sizeof(double), cudaMemcpyDeviceToHost, stream2);
// get dw2
dim3 dimGrid2((size_hidden + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpyAsync(dev_A1, A1, size_A1 * sizeof(double), cudaMemcpyHostToDevice, stream2);
Back_dW<<<dimGrid2, dimBlock, 0, stream2>>>(dev_A1, dev_dZ2, dev_dW2, size_batch, size_hidden, size_output, size_hidden);
cudaMemcpyAsync(dW2, dev_dW2, size_dW2 * sizeof(double), cudaMemcpyDeviceToHost, stream2);
// get db2
dim3 dimGrid3((1 + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
Back_db<<<dimGrid3, dimBlock, 0, stream2>>>(dev_dZ2, dev_db2, size_batch, size_output, size_batch);
cudaMemcpyAsync(db2, dev_db2, size_db2 * sizeof(double), cudaMemcpyDeviceToHost, stream2);
// get dA1
dim3 dimGrid4((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpyAsync(dev_W2, W2, size_W2 * sizeof(double), cudaMemcpyHostToDevice, stream2);
Back_dA1<<<dimGrid4, dimBlock, 0, stream2>>> (dev_W2, dev_dZ2, dev_dA1, size_batch, size_output, size_hidden, size_batch);
cudaMemcpyAsync(dA1, dev_dA1, size_dA1 * sizeof(double), cudaMemcpyDeviceToHost, stream2);
// get dZ1
dim3 dimGrid5((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpyAsync(dev_A1, A1, size_A1 * sizeof(double), cudaMemcpyHostToDevice, stream2);
cudaMemcpyAsync(dev_Z1, Z1, size_Z1 * sizeof(double), cudaMemcpyHostToDevice, stream2);
if(acti_type == 1)
Back_dZ1_Sigmoid<<<dimGrid5, dimBlock, 0, stream2>>>(dev_dA1, dev_A1, dev_Z1, dev_dZ1, size_batch, size_hidden, size_batch);
else if(acti_type == 2)
Back_dZ1_ReLU<<<dimGrid5, dimBlock, 0, stream2>>>(dev_dA1, dev_A1, dev_Z1, dev_dZ1, size_batch, size_hidden, size_batch);
cudaMemcpyAsync(dZ1, dev_dZ1, size_dZ1 * sizeof(double), cudaMemcpyDeviceToHost, stream2);
// get dW1
dim3 dimGrid6((size_input + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpyAsync(dev_X, X, size_X * sizeof(double), cudaMemcpyHostToDevice, stream2);
Back_dW<<<dimGrid6, dimBlock, 0, stream2>>>(dev_X, dev_dZ1, dev_dW1, size_batch, size_input, size_hidden, size_input);
cudaMemcpyAsync(dW1, dev_dW1, size_dW1 * sizeof(double), cudaMemcpyDeviceToHost, stream2);
// get b1
dim3 dimGrid7((1 + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
Back_db<<<dimGrid7, dimBlock, 0, stream2>>>(dev_dZ1, dev_db1, size_batch, size_hidden, size_batch);
cudaMemcpyAsync(db1, dev_db1, size_db1 * sizeof(double), cudaMemcpyDeviceToHost, stream2);
// free ZA on device
free_dev_ZA();
}
/* update W b */
void updateParameter(double learn_rate, int block_size)
{
// cudaSetDevice(0);
dim3 dimBlock(block_size, block_size);
// update w1
cudaMemcpyAsync(dev_dW1, dW1, size_dW1 * sizeof(double), cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(dev_W1, W1, size_W1 * sizeof(double), cudaMemcpyHostToDevice, stream1);
dim3 dimGrid1((size_input + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
update_Wb<<<dimGrid1, dimBlock, 0, stream1>>>(dev_dW1, dev_W1, size_input, learn_rate, size_hidden, size_input);
cudaMemcpyAsync(W1, dev_W1, size_W1 * sizeof(double), cudaMemcpyDeviceToHost, stream1);
// update b1
cudaMemcpyAsync(dev_db1, db1, size_db1 * sizeof(double), cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(dev_b1, b1, size_b1 * sizeof(double), cudaMemcpyHostToDevice, stream1);
dim3 dimGrid2((1 + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
update_Wb<<<dimGrid2, dimBlock, 0, stream1>>>(dev_db1, dev_b1, 1, learn_rate, size_hidden, 1);
cudaMemcpyAsync(b1, dev_b1, size_b1 * sizeof(double), cudaMemcpyDeviceToHost, stream1);
// update w2
cudaMemcpyAsync(dev_dW2, dW2, size_dW2 * sizeof(double), cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(dev_W2, W2, size_W2 * sizeof(double), cudaMemcpyHostToDevice, stream1);
dim3 dimGrid3((size_hidden + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
update_Wb<<<dimGrid3, dimBlock, 0, stream1>>>(dev_dW2, dev_W2, size_hidden, learn_rate, size_output, size_hidden);
cudaMemcpyAsync(W2, dev_W2, size_W2 * sizeof(double), cudaMemcpyDeviceToHost, stream1);
// update b2
cudaMemcpyAsync(dev_db2, db2, size_db2 * sizeof(double), cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(dev_b2, b2, size_b2 * sizeof(double), cudaMemcpyHostToDevice, stream1);
dim3 dimGrid4((1 + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
update_Wb<<<dimGrid4, dimBlock, 0, stream1>>>(dev_db2, dev_b2, 1, learn_rate, size_output, 1);
cudaMemcpyAsync(b2, dev_b2, size_b2 * sizeof(double), cudaMemcpyDeviceToHost, stream1);
// synchron stream
cudaStreamSynchronize(stream1);
}
double accuracy(int* max_index, int* Y, int size_batch)
{
int i;
double count = 0;
for(i = 0; i < size_batch; i++) {
if(Y(0, i) == max_index(0, i))
count += 1;
}
return count/double(size_batch);
}
void train(double* X_trn, int* Y_trn, int acti_type, int block_size)
{
forward(X_trn, Y_trn, "train", acti_type, block_size);
backprop(X_trn, Y_trn, acti_type, block_size); // 1 Sigmoid 2 ReLU
updateParameter(0.01, block_size);
}
double test(double* X, int* Y, string type, int acti_type, int block_size)
{
forward(X, Y, type, acti_type, block_size);
if(type == "train")
return accuracy(max_index, Y, size_train);
else
return accuracy(max_index, Y, size_test);
}
int main(int argc, char *argv[])
{
int block_size;
int epochs = 20000;
int acti_type;
double acc_trn, acc_tst;
if ( argc < 3 ){
printf(" Usage: first argument: block size \n");
printf(" second argument: activation type \n");
return -1;
} else if ( argc > 3 ) {
printf("\n Too many arguments. \n");
return -1;
} else {
block_size = atoi(argv[1]);
acti_type = atoi(argv[2]);
}
initialize_Wb();
initialize_dev_Wb();
initialize_dev_dZA(size_train);
read_data();
float elapsed_time = 0.0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaStreamCreate (&stream1);
cudaStreamCreate (&stream2);
cudaStreamCreate (&stream3);
for(int e = 0; e < epochs; e++) {
train(X_trn, Y_trn, acti_type, block_size);
// double loss = cross_entropy_loss(Y_trn, A2, size_train);
// printf("%f\n", loss);
// printf("the %d epoch, the training loss is: %f \n", e, loss);
}
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaStreamDestroy(stream3);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf( "Elapsed Time: %.4e msec. \n", elapsed_time );
acc_trn = test(X_trn, Y_trn, "train", acti_type, block_size);
acc_tst = test(X_tst, Y_tst, "test", acti_type, block_size);
printf("the training accuracy is: %f, the test accuracy is: %f\n", acc_trn, acc_tst);
free_dev_Wb();
free_dev_dZA();
}
|
c267c7ce01a95d60589601c3d3899c1cfeb2ca9b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "svt_utils.h"
/* __global__ void gf_unpack(unsigned int start_word, int *d_ids, int *d_out1, int *d_out2, int *d_out3, struct evt_arrays *evt_dev){
int evt = threadIdx.x;
bool gf_xft = 0;
int id_last = -1;
int id = d_ids[start_word];
if (id == XFT_LYR_2) { // compatibility - stp
id = XFT_LYR;
gf_xft = 1;
}
int nroads = evt_dev->evt_nroads[evt];
int nhits = evt_dev->evt_nhits[evt][nroads][id];
// SVX Data <------------------------- DA QUI
if (id < XFT_LYR) {
int zid = d_out1[i];
int lcl = d_out2[i];
int hit = d_out3[i];
evt_dev->evt_hit[evt][nroads][id][nhits] = hit;
evt_dev->evt_hitZ[evt][nroads][id][nhits] = zid;
evt_dev->evt_lcl[evt][nroads][id][nhits] = lcl;
evt_dev->evt_lclforcut[evt][nroads][id][nhits] = lcl;
evt_dev->evt_layerZ[evt][nroads][id] = zid;
if (evt_dev->evt_zid[evt][nroads] == -1) {
evt_dev->evt_zid[evt][nroads] = zid & gf_mask(GF_SUBZ_WIDTH);
} else {
evt_dev->evt_zid[evt][nroads] = (((zid & gf_mask(GF_SUBZ_WIDTH)) << GF_SUBZ_WIDTH)
+ (evt_dev->evt_zid[evt][nroads] & gf_mask(GF_SUBZ_WIDTH)));
}
nhits = ++evt_dev->evt_nhits[evt][nroads][id];
// Error Checking
if (nhits == MAX_HIT) evt_dev->evt_err[evt][nroads] |= (1 << OFLOW_HIT_BIT);
if (id < id_last) evt_dev->evt_err[evt][nroads] |= (1 << OUTORDER_BIT);
} else if (id == XFT_LYR && gf_xft == 0) {
// we ignore - stp
} else if (id == XFT_LYR && gf_xft == 1) {
int crv = d_out1[i];
int crv_sign = d_out2[i];
int phi = d_out3[i];
evt_dev->evt_crv[evt][nroads][nhits] = crv;
evt_dev->evt_crv_sign[evt][nroads][nhits] = crv_sign;
evt_dev->evt_phi[evt][nroads][nhits] = phi;
nhits = ++evt_dev->evt_nhits[evt][nroads][id];
// Error Checking
if (nhits == MAX_HIT) evt_dev->evt_err[evt][nroads] |= (1 << OFLOW_HIT_BIT);
if (id < id_last) evt_dev->evt_err[evt][nroads] |= (1 << OUTORDER_BIT);
} else if (id == EP_LYR) {
int sector = d_out1[i];
int amroad = d_out2[i];
evt_dev->evt_cable_sect[evt][nroads] = sector;
evt_dev->evt_sect[evt][nroads] = sector;
evt_dev->evt_road[evt][nroads] = amroad;
evt_dev->evt_err_sum[evt] |= evt_dev->evt_err[evt][nroads];
nroads = ++evt_dev->evt_nroads[evt];
for (id = 0; id <= XFT_LYR; id++)
evt_dev->evt_nhits[evt][nroads][id] = 0;
evt_dev->evt_err[evt][nroads] = 0;
evt_dev->evt_zid[evt][nroads] = -1;
id = -1; id_last = -1;
} else if (id == EE_LYR) {
evt_dev->evt_ee_word[evt] = d_out[i];
atomicAdd(&tEvts, 1);
id = -1; id_last = -1;
} else {
evt_dev->evt_err[evt][nroads] |= (1 << INV_DATA_BIT);
}
id_last = id;
}*/
// Unpacking evts data and return number of events
unsigned int gf_unpack_cuda_GPU(int *ids, int *out1, int *out2, int *out3, int n_words, struct evt_arrays *evt_dev, int* d_tEvts, struct evt_arrays *evta, unsigned int *start_word, hipStream_t stream, hipEvent_t event ) {
MY_CUDA_CHECK(hipStreamSynchronize(stream));
/*MY_CUDA_CHECK(hipStreamWaitEvent(stream, event, 0));
hipMemcpyAsync(evt_dev, evta, sizeof(struct evt_arrays), hipMemcpyHostToDevice, stream);*/
unsigned int tEvts=0;
///////////////// now fill evt (gf_fep_unpack)
memset(evta->evt_nroads, 0, sizeof(evta->evt_nroads));
memset(evta->evt_err_sum, 0, sizeof(evta->evt_err_sum));
memset(evta->evt_layerZ, 0, sizeof(evta->evt_layerZ));
memset(evta->evt_nhits, 0, sizeof(evta->evt_nhits));
memset(evta->evt_err, 0, sizeof(evta->evt_err));
memset(evta->evt_zid, 0, sizeof(evta->evt_zid));
for (int ie = 0; ie < NEVTS; ie++) {
evta->evt_zid[ie][evta->evt_nroads[ie]] = -1; // because we set it to 0 for GPU version
}
int id_last = -1;
int evt = EVT;
int id;
unsigned int i = 0;
if((start_word != NULL) ){
if(*start_word < n_words){
i = *start_word;
}else{
printf("gf_unpack_cuda_GPU ERROR: *start_word is >= than n_words; starting from zero\n");
}
}//start from zero if NULL
do {
id = ids[i];
bool gf_xft = 0;
if (id == XFT_LYR_2) { // compatibility - stp
id = XFT_LYR;
gf_xft = 1;
}
int nroads = evta->evt_nroads[evt];
int nhits = evta->evt_nhits[evt][nroads][id];
// SVX Data
if (id < XFT_LYR) {
int zid = out1[i];
int lcl = out2[i];
int hit = out3[i];
evta->evt_hit[evt][nroads][id][nhits] = hit;
evta->evt_hitZ[evt][nroads][id][nhits] = zid;
evta->evt_lcl[evt][nroads][id][nhits] = lcl;
evta->evt_lclforcut[evt][nroads][id][nhits] = lcl;
evta->evt_layerZ[evt][nroads][id] = zid;
if (evta->evt_zid[evt][nroads] == -1) {
evta->evt_zid[evt][nroads] = zid & gf_mask(GF_SUBZ_WIDTH);
} else {
evta->evt_zid[evt][nroads] = (((zid & gf_mask(GF_SUBZ_WIDTH)) << GF_SUBZ_WIDTH)
+ (evta->evt_zid[evt][nroads] & gf_mask(GF_SUBZ_WIDTH)));
}
nhits = ++evta->evt_nhits[evt][nroads][id];
// Error Checking
if (nhits == MAX_HIT) evta->evt_err[evt][nroads] |= (1 << OFLOW_HIT_BIT);
if (id < id_last) evta->evt_err[evt][nroads] |= (1 << OUTORDER_BIT);
} else if (id == XFT_LYR && gf_xft == 0) {
// we ignore - stp
} else if (id == XFT_LYR && gf_xft == 1) {
int crv = out1[i];
int crv_sign = out2[i];
int phi = out3[i];
evta->evt_crv[evt][nroads][nhits] = crv;
evta->evt_crv_sign[evt][nroads][nhits] = crv_sign;
evta->evt_phi[evt][nroads][nhits] = phi;
nhits = ++evta->evt_nhits[evt][nroads][id];
// Error Checking
if (nhits == MAX_HIT) evta->evt_err[evt][nroads] |= (1 << OFLOW_HIT_BIT);
if (id < id_last) evta->evt_err[evt][nroads] |= (1 << OUTORDER_BIT);
} else if (id == EP_LYR) {
int sector = out1[i];
int amroad = out2[i];
evta->evt_cable_sect[evt][nroads] = sector;
evta->evt_sect[evt][nroads] = sector;
evta->evt_road[evt][nroads] = amroad;
evta->evt_err_sum[evt] |= evta->evt_err[evt][nroads];
nroads = ++evta->evt_nroads[evt];
if (nroads > MAXROAD) {
printf("The limit on the number of roads fitted by the TF is %d\n",MAXROAD);
printf("You reached that limit evt->nroads = %d\n",nroads);
}
for (id = 0; id <= XFT_LYR; id++)
evta->evt_nhits[evt][nroads][id] = 0;
evta->evt_err[evt][nroads] = 0;
evta->evt_zid[evt][nroads] = -1;
id = -1; id_last = -1;
} else if (id == EE_LYR) {
evta->evt_ee_word[evt] = out1[i];
tEvts++;
evt++;
id = -1; id_last = -1;
} else {
printf("Error INV_DATA_BIT: layer = %u\n", id);
evta->evt_err[evt][nroads] |= (1 << INV_DATA_BIT);
}
id_last = id;
//increment words counter
i++;
}while((i < n_words) && (tEvts < NEVTS)); //end loop on input words when tEvts == NEVTS
hipMemcpyAsync(evt_dev, evta, sizeof(struct evt_arrays), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_tEvts, &tEvts, sizeof(int), hipMemcpyHostToDevice, stream);
//printf("tEvts after gf_unpack_cuda(): %d\n", tEvts);
//returning counter of read words
*start_word = i;
return tEvts;
}
|
c267c7ce01a95d60589601c3d3899c1cfeb2ca9b.cu
|
#include "svt_utils.h"
/* __global__ void gf_unpack(unsigned int start_word, int *d_ids, int *d_out1, int *d_out2, int *d_out3, struct evt_arrays *evt_dev){
int evt = threadIdx.x;
bool gf_xft = 0;
int id_last = -1;
int id = d_ids[start_word];
if (id == XFT_LYR_2) { // compatibility - stp
id = XFT_LYR;
gf_xft = 1;
}
int nroads = evt_dev->evt_nroads[evt];
int nhits = evt_dev->evt_nhits[evt][nroads][id];
// SVX Data <------------------------- DA QUI
if (id < XFT_LYR) {
int zid = d_out1[i];
int lcl = d_out2[i];
int hit = d_out3[i];
evt_dev->evt_hit[evt][nroads][id][nhits] = hit;
evt_dev->evt_hitZ[evt][nroads][id][nhits] = zid;
evt_dev->evt_lcl[evt][nroads][id][nhits] = lcl;
evt_dev->evt_lclforcut[evt][nroads][id][nhits] = lcl;
evt_dev->evt_layerZ[evt][nroads][id] = zid;
if (evt_dev->evt_zid[evt][nroads] == -1) {
evt_dev->evt_zid[evt][nroads] = zid & gf_mask(GF_SUBZ_WIDTH);
} else {
evt_dev->evt_zid[evt][nroads] = (((zid & gf_mask(GF_SUBZ_WIDTH)) << GF_SUBZ_WIDTH)
+ (evt_dev->evt_zid[evt][nroads] & gf_mask(GF_SUBZ_WIDTH)));
}
nhits = ++evt_dev->evt_nhits[evt][nroads][id];
// Error Checking
if (nhits == MAX_HIT) evt_dev->evt_err[evt][nroads] |= (1 << OFLOW_HIT_BIT);
if (id < id_last) evt_dev->evt_err[evt][nroads] |= (1 << OUTORDER_BIT);
} else if (id == XFT_LYR && gf_xft == 0) {
// we ignore - stp
} else if (id == XFT_LYR && gf_xft == 1) {
int crv = d_out1[i];
int crv_sign = d_out2[i];
int phi = d_out3[i];
evt_dev->evt_crv[evt][nroads][nhits] = crv;
evt_dev->evt_crv_sign[evt][nroads][nhits] = crv_sign;
evt_dev->evt_phi[evt][nroads][nhits] = phi;
nhits = ++evt_dev->evt_nhits[evt][nroads][id];
// Error Checking
if (nhits == MAX_HIT) evt_dev->evt_err[evt][nroads] |= (1 << OFLOW_HIT_BIT);
if (id < id_last) evt_dev->evt_err[evt][nroads] |= (1 << OUTORDER_BIT);
} else if (id == EP_LYR) {
int sector = d_out1[i];
int amroad = d_out2[i];
evt_dev->evt_cable_sect[evt][nroads] = sector;
evt_dev->evt_sect[evt][nroads] = sector;
evt_dev->evt_road[evt][nroads] = amroad;
evt_dev->evt_err_sum[evt] |= evt_dev->evt_err[evt][nroads];
nroads = ++evt_dev->evt_nroads[evt];
for (id = 0; id <= XFT_LYR; id++)
evt_dev->evt_nhits[evt][nroads][id] = 0;
evt_dev->evt_err[evt][nroads] = 0;
evt_dev->evt_zid[evt][nroads] = -1;
id = -1; id_last = -1;
} else if (id == EE_LYR) {
evt_dev->evt_ee_word[evt] = d_out[i];
atomicAdd(&tEvts, 1);
id = -1; id_last = -1;
} else {
evt_dev->evt_err[evt][nroads] |= (1 << INV_DATA_BIT);
}
id_last = id;
}*/
// Unpacking evts data and return number of events
unsigned int gf_unpack_cuda_GPU(int *ids, int *out1, int *out2, int *out3, int n_words, struct evt_arrays *evt_dev, int* d_tEvts, struct evt_arrays *evta, unsigned int *start_word, cudaStream_t stream, cudaEvent_t event ) {
MY_CUDA_CHECK(cudaStreamSynchronize(stream));
/*MY_CUDA_CHECK(cudaStreamWaitEvent(stream, event, 0));
cudaMemcpyAsync(evt_dev, evta, sizeof(struct evt_arrays), cudaMemcpyHostToDevice, stream);*/
unsigned int tEvts=0;
///////////////// now fill evt (gf_fep_unpack)
memset(evta->evt_nroads, 0, sizeof(evta->evt_nroads));
memset(evta->evt_err_sum, 0, sizeof(evta->evt_err_sum));
memset(evta->evt_layerZ, 0, sizeof(evta->evt_layerZ));
memset(evta->evt_nhits, 0, sizeof(evta->evt_nhits));
memset(evta->evt_err, 0, sizeof(evta->evt_err));
memset(evta->evt_zid, 0, sizeof(evta->evt_zid));
for (int ie = 0; ie < NEVTS; ie++) {
evta->evt_zid[ie][evta->evt_nroads[ie]] = -1; // because we set it to 0 for GPU version
}
int id_last = -1;
int evt = EVT;
int id;
unsigned int i = 0;
if((start_word != NULL) ){
if(*start_word < n_words){
i = *start_word;
}else{
printf("gf_unpack_cuda_GPU ERROR: *start_word is >= than n_words; starting from zero\n");
}
}//start from zero if NULL
do {
id = ids[i];
bool gf_xft = 0;
if (id == XFT_LYR_2) { // compatibility - stp
id = XFT_LYR;
gf_xft = 1;
}
int nroads = evta->evt_nroads[evt];
int nhits = evta->evt_nhits[evt][nroads][id];
// SVX Data
if (id < XFT_LYR) {
int zid = out1[i];
int lcl = out2[i];
int hit = out3[i];
evta->evt_hit[evt][nroads][id][nhits] = hit;
evta->evt_hitZ[evt][nroads][id][nhits] = zid;
evta->evt_lcl[evt][nroads][id][nhits] = lcl;
evta->evt_lclforcut[evt][nroads][id][nhits] = lcl;
evta->evt_layerZ[evt][nroads][id] = zid;
if (evta->evt_zid[evt][nroads] == -1) {
evta->evt_zid[evt][nroads] = zid & gf_mask(GF_SUBZ_WIDTH);
} else {
evta->evt_zid[evt][nroads] = (((zid & gf_mask(GF_SUBZ_WIDTH)) << GF_SUBZ_WIDTH)
+ (evta->evt_zid[evt][nroads] & gf_mask(GF_SUBZ_WIDTH)));
}
nhits = ++evta->evt_nhits[evt][nroads][id];
// Error Checking
if (nhits == MAX_HIT) evta->evt_err[evt][nroads] |= (1 << OFLOW_HIT_BIT);
if (id < id_last) evta->evt_err[evt][nroads] |= (1 << OUTORDER_BIT);
} else if (id == XFT_LYR && gf_xft == 0) {
// we ignore - stp
} else if (id == XFT_LYR && gf_xft == 1) {
int crv = out1[i];
int crv_sign = out2[i];
int phi = out3[i];
evta->evt_crv[evt][nroads][nhits] = crv;
evta->evt_crv_sign[evt][nroads][nhits] = crv_sign;
evta->evt_phi[evt][nroads][nhits] = phi;
nhits = ++evta->evt_nhits[evt][nroads][id];
// Error Checking
if (nhits == MAX_HIT) evta->evt_err[evt][nroads] |= (1 << OFLOW_HIT_BIT);
if (id < id_last) evta->evt_err[evt][nroads] |= (1 << OUTORDER_BIT);
} else if (id == EP_LYR) {
int sector = out1[i];
int amroad = out2[i];
evta->evt_cable_sect[evt][nroads] = sector;
evta->evt_sect[evt][nroads] = sector;
evta->evt_road[evt][nroads] = amroad;
evta->evt_err_sum[evt] |= evta->evt_err[evt][nroads];
nroads = ++evta->evt_nroads[evt];
if (nroads > MAXROAD) {
printf("The limit on the number of roads fitted by the TF is %d\n",MAXROAD);
printf("You reached that limit evt->nroads = %d\n",nroads);
}
for (id = 0; id <= XFT_LYR; id++)
evta->evt_nhits[evt][nroads][id] = 0;
evta->evt_err[evt][nroads] = 0;
evta->evt_zid[evt][nroads] = -1;
id = -1; id_last = -1;
} else if (id == EE_LYR) {
evta->evt_ee_word[evt] = out1[i];
tEvts++;
evt++;
id = -1; id_last = -1;
} else {
printf("Error INV_DATA_BIT: layer = %u\n", id);
evta->evt_err[evt][nroads] |= (1 << INV_DATA_BIT);
}
id_last = id;
//increment words counter
i++;
}while((i < n_words) && (tEvts < NEVTS)); //end loop on input words when tEvts == NEVTS
cudaMemcpyAsync(evt_dev, evta, sizeof(struct evt_arrays), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_tEvts, &tEvts, sizeof(int), cudaMemcpyHostToDevice, stream);
//printf("tEvts after gf_unpack_cuda(): %d\n", tEvts);
//returning counter of read words
*start_word = i;
return tEvts;
}
|
1ed36d3f4fb731786e5ddee5a60ae8f44a3c317c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "assert.h"
#include "real.h"
#include <iostream>
#include "gpuerrchk.cuh"
#include "math.h"
#define MAX_MASK_WIDTH 10
__device__ __constant__ float d_M[1000];
__global__ void constant_conv_kernel(real* A, real* P, int mask_width, int width){
int i=blockIdx.x*blockDim.x+threadIdx.x;
real Pvalue=0; //mask width is assumed odd So there are mask_width values in [-mask_width/2, mask_width/2]
for(int j=i-mask_width/2; j<=i+mask_width/2; ++j){
if (j>=0 && j<width)
Pvalue+= A[j]*d_M[j-(i-mask_width/2)];
}
P[i]=Pvalue;
}
void constant_conv(real* A,float* M, real* P, int mask_width, int width ){
real* d_A;
real* d_P;
gpuErrchk(hipMalloc((void**)&d_A, sizeof(real)*width ));
gpuErrchk(hipMemcpy(d_A, A, sizeof(real)*width, hipMemcpyHostToDevice ) );
gpuErrchk(hipMemcpyToSymbol(d_M, M, sizeof(real)*mask_width ) );
gpuErrchk(hipMalloc((void**)&d_P, sizeof(real)*width ));
int blocksize=512;
hipLaunchKernelGGL(( constant_conv_kernel), dim3(ceil(width/ (real)blocksize)),dim3(blocksize) , 0, 0, d_A, d_P, mask_width, width);
gpuErrchk(hipMemcpy(P, d_P, sizeof(real)*width, hipMemcpyDeviceToHost ) );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk(hipFree(d_A ) );
gpuErrchk(hipFree(d_P ) );
}
void trial(){
constexpr int asize=1000;
constexpr int bsize=11;
real A[asize];
for(int i=0; i< asize; i++){
A[i]=1;
}
real M[bsize];
for (int i=0; i<bsize; ++i){
M[i]=i;
}
real P[asize];
constant_conv(A,M,P,bsize,asize);
}
int main(){
trial();
}
|
1ed36d3f4fb731786e5ddee5a60ae8f44a3c317c.cu
|
#include "assert.h"
#include "real.h"
#include <iostream>
#include "gpuerrchk.cuh"
#include "math.h"
#define MAX_MASK_WIDTH 10
__device__ __constant__ float d_M[1000];
__global__ void constant_conv_kernel(real* A, real* P, int mask_width, int width){
int i=blockIdx.x*blockDim.x+threadIdx.x;
real Pvalue=0; //mask width is assumed odd So there are mask_width values in [-mask_width/2, mask_width/2]
for(int j=i-mask_width/2; j<=i+mask_width/2; ++j){
if (j>=0 && j<width)
Pvalue+= A[j]*d_M[j-(i-mask_width/2)];
}
P[i]=Pvalue;
}
void constant_conv(real* A,float* M, real* P, int mask_width, int width ){
real* d_A;
real* d_P;
gpuErrchk(cudaMalloc((void**)&d_A, sizeof(real)*width ));
gpuErrchk(cudaMemcpy(d_A, A, sizeof(real)*width, cudaMemcpyHostToDevice ) );
gpuErrchk(cudaMemcpyToSymbol(d_M, M, sizeof(real)*mask_width ) );
gpuErrchk(cudaMalloc((void**)&d_P, sizeof(real)*width ));
int blocksize=512;
constant_conv_kernel<<<ceil(width/ (real)blocksize),blocksize >>>(d_A, d_P, mask_width, width);
gpuErrchk(cudaMemcpy(P, d_P, sizeof(real)*width, cudaMemcpyDeviceToHost ) );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk(cudaFree(d_A ) );
gpuErrchk(cudaFree(d_P ) );
}
void trial(){
constexpr int asize=1000;
constexpr int bsize=11;
real A[asize];
for(int i=0; i< asize; i++){
A[i]=1;
}
real M[bsize];
for (int i=0; i<bsize; ++i){
M[i]=i;
}
real P[asize];
constant_conv(A,M,P,bsize,asize);
}
int main(){
trial();
}
|
c7eea813252e235655d43143e5f5a89c58b9a1f1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <arrayfire.h>
#include <hiprand/hiprand_kernel.h>
using namespace af;
#include <stdio.h>
#include <time.h>
typedef unsigned int IntegerType;
// generate millions of random samples
static IntegerType samples = 30e6;
static float g_h_elapsedtime;
static IntegerType g_h_PerThreadLoad;
static IntegerType g_h_BlkCount;
static IntegerType g_h_ThrdCount;
static IntegerType repetitions;
static IntegerType leftOverSize;
const int BLOCKS_PER_SM = 8;
const int DEFAULT_UNWIND_COUNT = 8;
const float g_h_fraction = 0.649161;
IntegerType *g_d_blockCounts;
IntegerType *g_h_output;
static void HandleError( hipError_t err, const char *file, int line )
{
if (err != hipSuccess)
{
printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
/*
Self-contained code to run each implementation of PI estimation.
Note that each is generating its own random values, so the
estimates of PI will differ.
*/
static double pi_cpu()
{
IntegerType count = 0;
for (IntegerType i = 0; i < samples; ++i) {
float x = float(rand()) / RAND_MAX;
float y = float(rand()) / RAND_MAX;
if (x*x + y*y < 1)
count++;
}
return 4.0 * count / samples;
}
static double pi_af()
{
array x = randu(samples,f32), y = randu(samples,f32);
return 4 * sum<float>(x*x + y*y <= 1) / samples;
}
/**
* Below kernel is used for finding rough estimation
* of time taken for one single iteration on the device
* The resulting estimation is used to find per thread work load
*/
__global__ void opTimeEstimation(IntegerType fSamples)
{
for(IntegerType i=0;i<fSamples;++i)
{
IntegerType seed = i;
hiprandState_t s;
hiprand_init(seed, 0, 0, &s);
float x = hiprand_uniform(&s);
float y = hiprand_uniform(&s);
bool value = ( x*x + y*y < 1 ? 1 : 0 );
}
}
static void pi_init_cuda()
{
/*
TODO any initialization code you need goes here, e.g. random
number seeding, hipMalloc allocations, etc. Random number
_generation_ should still go in pi_cuda().
*/
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
printf("-------------------------------------------\n");
printf("Device name: %s\n", prop.name);
printf("Shared memory per block = %d KB\n",prop.sharedMemPerBlock/1024);
printf("Multiprocessor count : %d\n", prop.multiProcessorCount);
printf("Warp size : %d\n", prop.warpSize);
printf("Max blocks per (x,y,z) dimension = (%d,%d,%d)\n",prop.maxGridSize[0],prop.maxGridSize[1],prop.maxGridSize[2]);
printf("Max threads per (x,y,z) dimension = (%d,%d,%d)\n",prop.maxThreadsDim[0],prop.maxThreadsDim[1],prop.maxThreadsDim[2]);
printf("-------------------------------------------\n");
g_h_ThrdCount = prop.maxThreadsPerBlock/2;
/**
* consider the following operations as one single task
* generate two random numbers
* find sum or their squares
* compare if less than < 1
* estimate time for one such task on device by launching a <<<1,1>>> thread
*/
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
hipLaunchKernelGGL(( opTimeEstimation), dim3(1),dim3(1), 0, 0, samples);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&g_h_elapsedtime,start,stop);
printf ("Time for the kernel: %f ms\n", g_h_elapsedtime);
g_h_PerThreadLoad = g_h_fraction*samples/(g_h_elapsedtime*1.0);
printf("Max possible Per thread work load %d \n",g_h_PerThreadLoad);
g_h_BlkCount = (float)samples/(g_h_ThrdCount*g_h_PerThreadLoad)+0.5f;
g_h_BlkCount = g_h_BlkCount + (prop.multiProcessorCount - (g_h_BlkCount%prop.multiProcessorCount));
g_h_PerThreadLoad = (float)samples/(g_h_BlkCount*g_h_ThrdCount)+0.5f;
samples = g_h_BlkCount*g_h_ThrdCount*g_h_PerThreadLoad;
printf("Number of blocks : %d\n",g_h_BlkCount);
printf("Number of threads per block : %d\n",g_h_ThrdCount);
printf("Per thread load : %d\n",g_h_PerThreadLoad);
printf("Global array size : %d\n",g_h_BlkCount*sizeof(IntegerType));
HANDLE_ERROR( hipMalloc((void**)&g_d_blockCounts, g_h_BlkCount*sizeof(IntegerType)) );
g_h_output = (IntegerType*)malloc(sizeof(IntegerType));
repetitions = g_h_PerThreadLoad/DEFAULT_UNWIND_COUNT;
leftOverSize = g_h_PerThreadLoad%DEFAULT_UNWIND_COUNT;
}
__global__ void pointTest(IntegerType* fBlockCounts, IntegerType fNumThreads, IntegerType fPerThreadLoad,
unsigned long fSeed, const IntegerType fRepeats, const IntegerType fLeftOverSize)
{
extern __shared__ volatile IntegerType cache[];
hiprandState_t myState;;
IntegerType myId = blockIdx.x*blockDim.x + threadIdx.x;
hiprand_init(fSeed, myId, 0, &myState);
IntegerType count = 0;
// unroll the loop DEFAULT_UNWIND_COUNT times
for(IntegerType unwind_k=0; unwind_k<fRepeats; unwind_k++)
{
float x,y;
/* 8 times */
x = hiprand_uniform(&myState); y = hiprand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
x = hiprand_uniform(&myState); y = hiprand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
x = hiprand_uniform(&myState); y = hiprand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
x = hiprand_uniform(&myState); y = hiprand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
x = hiprand_uniform(&myState); y = hiprand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
x = hiprand_uniform(&myState); y = hiprand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
x = hiprand_uniform(&myState); y = hiprand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
x = hiprand_uniform(&myState); y = hiprand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
}
// loop rest over elements
for(IntegerType leftOver_k=0; leftOver_k<fLeftOverSize; ++leftOver_k)
{
float x = hiprand_uniform(&myState);
float y = hiprand_uniform(&myState);
if((x*x + y*y) < 1.0) count++;
}
cache[threadIdx.x] = count;
__syncthreads();
// Reduction of this cache.
while( (fNumThreads>>=1)>0 )
{
if(threadIdx.x<fNumThreads)
cache[threadIdx.x] += cache[threadIdx.x+fNumThreads];
__syncthreads();
}
/*if(threadIdx.x<32)
{
cache[threadIdx.x] += cache[threadIdx.x+32];
cache[threadIdx.x] += cache[threadIdx.x+16];
cache[threadIdx.x] += cache[threadIdx.x+8];
cache[threadIdx.x] += cache[threadIdx.x+4];
cache[threadIdx.x] += cache[threadIdx.x+2];
}*/
// End of reduction: thread-id 0 puts in cache[0]
if(threadIdx.x == 0)
fBlockCounts[blockIdx.x] = cache[0];
}
__global__ void sumUpBlkCounts(IntegerType* fBlockCounts, IntegerType fSize)
{
for(IntegerType k=1;k<fSize;++k)
{
fBlockCounts[0] += fBlockCounts[k];
}
}
static double pi_cuda()
{
/*
TODO Put your code here. You can use anything in the CUDA
Toolkit, including libraries, Thrust, or your own device
kernels, but do not use ArrayFire functions here. If you have
initialization code, see pi_init_cuda().
*/
hipLaunchKernelGGL(( pointTest), dim3(g_h_BlkCount),dim3(g_h_ThrdCount),g_h_ThrdCount*sizeof(IntegerType), 0, g_d_blockCounts,g_h_ThrdCount,g_h_PerThreadLoad,time(NULL),repetitions,leftOverSize);
HANDLE_ERROR( hipPeekAtLastError() );
hipLaunchKernelGGL(( sumUpBlkCounts), dim3(1),dim3(1), 0, 0, g_d_blockCounts,g_h_BlkCount);
HANDLE_ERROR( hipDeviceSynchronize() );
HANDLE_ERROR( hipMemcpy(g_h_output,g_d_blockCounts,sizeof(IntegerType),hipMemcpyDeviceToHost) );
return 4.0 * g_h_output[0] / samples;
}
// void wrappers for timeit()
static void wrap_cpu() { pi_cpu(); }
static void wrap_af() { pi_af(); }
static void wrap_cuda() { pi_cuda(); }
static void experiment(const char *method, double time, double error, double cpu_time)
{
printf("%10s: %7.5f seconds, error=%.8f", method, time, error);
if (time > cpu_time) printf(" ... needs speed!");
if (error > 1e-3) printf(" ... needs accuracy!");
putchar('\n');
}
int main(int argc, char** argv)
{
try {
// perform timings and calculate error from reference af::Pi
info();
double t_cpu = timeit(wrap_cpu), e_cpu = fabs(af::Pi - pi_cpu());
double t_af = timeit(wrap_af), e_af = fabs(af::Pi - pi_af());
pi_init_cuda();
double t_cuda = timeit(wrap_cuda), e_cuda = fabs(af::Pi - pi_cuda());
hipFree(g_d_blockCounts);
// print results
experiment("cpu", t_cpu, e_cpu, t_cpu);
experiment("arrayfire", t_af, e_af, t_cpu);
experiment("cuda", t_cuda, e_cuda, t_cpu);
} catch (af::exception& e) {
fprintf(stderr, "%s\n", e.what());
throw;
}
#ifdef WIN32 // pause in Windows
if (!(argc == 2 && argv[1][0] == '-')) {
printf("hit [enter]...");
getchar();
}
#endif
return 0;
}
|
c7eea813252e235655d43143e5f5a89c58b9a1f1.cu
|
#include <arrayfire.h>
#include <curand_kernel.h>
using namespace af;
#include <stdio.h>
#include <time.h>
typedef unsigned int IntegerType;
// generate millions of random samples
static IntegerType samples = 30e6;
static float g_h_elapsedtime;
static IntegerType g_h_PerThreadLoad;
static IntegerType g_h_BlkCount;
static IntegerType g_h_ThrdCount;
static IntegerType repetitions;
static IntegerType leftOverSize;
const int BLOCKS_PER_SM = 8;
const int DEFAULT_UNWIND_COUNT = 8;
const float g_h_fraction = 0.649161;
IntegerType *g_d_blockCounts;
IntegerType *g_h_output;
static void HandleError( cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess)
{
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
/*
Self-contained code to run each implementation of PI estimation.
Note that each is generating its own random values, so the
estimates of PI will differ.
*/
static double pi_cpu()
{
IntegerType count = 0;
for (IntegerType i = 0; i < samples; ++i) {
float x = float(rand()) / RAND_MAX;
float y = float(rand()) / RAND_MAX;
if (x*x + y*y < 1)
count++;
}
return 4.0 * count / samples;
}
static double pi_af()
{
array x = randu(samples,f32), y = randu(samples,f32);
return 4 * sum<float>(x*x + y*y <= 1) / samples;
}
/**
* Below kernel is used for finding rough estimation
* of time taken for one single iteration on the device
* The resulting estimation is used to find per thread work load
*/
__global__ void opTimeEstimation(IntegerType fSamples)
{
for(IntegerType i=0;i<fSamples;++i)
{
IntegerType seed = i;
curandState s;
curand_init(seed, 0, 0, &s);
float x = curand_uniform(&s);
float y = curand_uniform(&s);
bool value = ( x*x + y*y < 1 ? 1 : 0 );
}
}
static void pi_init_cuda()
{
/*
TODO any initialization code you need goes here, e.g. random
number seeding, cudaMalloc allocations, etc. Random number
_generation_ should still go in pi_cuda().
*/
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("-------------------------------------------\n");
printf("Device name: %s\n", prop.name);
printf("Shared memory per block = %d KB\n",prop.sharedMemPerBlock/1024);
printf("Multiprocessor count : %d\n", prop.multiProcessorCount);
printf("Warp size : %d\n", prop.warpSize);
printf("Max blocks per (x,y,z) dimension = (%d,%d,%d)\n",prop.maxGridSize[0],prop.maxGridSize[1],prop.maxGridSize[2]);
printf("Max threads per (x,y,z) dimension = (%d,%d,%d)\n",prop.maxThreadsDim[0],prop.maxThreadsDim[1],prop.maxThreadsDim[2]);
printf("-------------------------------------------\n");
g_h_ThrdCount = prop.maxThreadsPerBlock/2;
/**
* consider the following operations as one single task
* generate two random numbers
* find sum or their squares
* compare if less than < 1
* estimate time for one such task on device by launching a <<<1,1>>> thread
*/
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
opTimeEstimation<<<1,1>>>(samples);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&g_h_elapsedtime,start,stop);
printf ("Time for the kernel: %f ms\n", g_h_elapsedtime);
g_h_PerThreadLoad = g_h_fraction*samples/(g_h_elapsedtime*1.0);
printf("Max possible Per thread work load %d \n",g_h_PerThreadLoad);
g_h_BlkCount = (float)samples/(g_h_ThrdCount*g_h_PerThreadLoad)+0.5f;
g_h_BlkCount = g_h_BlkCount + (prop.multiProcessorCount - (g_h_BlkCount%prop.multiProcessorCount));
g_h_PerThreadLoad = (float)samples/(g_h_BlkCount*g_h_ThrdCount)+0.5f;
samples = g_h_BlkCount*g_h_ThrdCount*g_h_PerThreadLoad;
printf("Number of blocks : %d\n",g_h_BlkCount);
printf("Number of threads per block : %d\n",g_h_ThrdCount);
printf("Per thread load : %d\n",g_h_PerThreadLoad);
printf("Global array size : %d\n",g_h_BlkCount*sizeof(IntegerType));
HANDLE_ERROR( cudaMalloc((void**)&g_d_blockCounts, g_h_BlkCount*sizeof(IntegerType)) );
g_h_output = (IntegerType*)malloc(sizeof(IntegerType));
repetitions = g_h_PerThreadLoad/DEFAULT_UNWIND_COUNT;
leftOverSize = g_h_PerThreadLoad%DEFAULT_UNWIND_COUNT;
}
__global__ void pointTest(IntegerType* fBlockCounts, IntegerType fNumThreads, IntegerType fPerThreadLoad,
unsigned long fSeed, const IntegerType fRepeats, const IntegerType fLeftOverSize)
{
extern __shared__ volatile IntegerType cache[];
curandState myState;;
IntegerType myId = blockIdx.x*blockDim.x + threadIdx.x;
curand_init(fSeed, myId, 0, &myState);
IntegerType count = 0;
// unroll the loop DEFAULT_UNWIND_COUNT times
for(IntegerType unwind_k=0; unwind_k<fRepeats; unwind_k++)
{
float x,y;
/* 8 times */
x = curand_uniform(&myState); y = curand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
x = curand_uniform(&myState); y = curand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
x = curand_uniform(&myState); y = curand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
x = curand_uniform(&myState); y = curand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
x = curand_uniform(&myState); y = curand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
x = curand_uniform(&myState); y = curand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
x = curand_uniform(&myState); y = curand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
x = curand_uniform(&myState); y = curand_uniform(&myState); if((x*x + y*y) < 1.0) count++;
}
// loop rest over elements
for(IntegerType leftOver_k=0; leftOver_k<fLeftOverSize; ++leftOver_k)
{
float x = curand_uniform(&myState);
float y = curand_uniform(&myState);
if((x*x + y*y) < 1.0) count++;
}
cache[threadIdx.x] = count;
__syncthreads();
// Reduction of this cache.
while( (fNumThreads>>=1)>0 )
{
if(threadIdx.x<fNumThreads)
cache[threadIdx.x] += cache[threadIdx.x+fNumThreads];
__syncthreads();
}
/*if(threadIdx.x<32)
{
cache[threadIdx.x] += cache[threadIdx.x+32];
cache[threadIdx.x] += cache[threadIdx.x+16];
cache[threadIdx.x] += cache[threadIdx.x+8];
cache[threadIdx.x] += cache[threadIdx.x+4];
cache[threadIdx.x] += cache[threadIdx.x+2];
}*/
// End of reduction: thread-id 0 puts in cache[0]
if(threadIdx.x == 0)
fBlockCounts[blockIdx.x] = cache[0];
}
__global__ void sumUpBlkCounts(IntegerType* fBlockCounts, IntegerType fSize)
{
for(IntegerType k=1;k<fSize;++k)
{
fBlockCounts[0] += fBlockCounts[k];
}
}
static double pi_cuda()
{
/*
TODO Put your code here. You can use anything in the CUDA
Toolkit, including libraries, Thrust, or your own device
kernels, but do not use ArrayFire functions here. If you have
initialization code, see pi_init_cuda().
*/
pointTest<<<g_h_BlkCount,g_h_ThrdCount,g_h_ThrdCount*sizeof(IntegerType)>>>(g_d_blockCounts,g_h_ThrdCount,g_h_PerThreadLoad,time(NULL),repetitions,leftOverSize);
HANDLE_ERROR( cudaPeekAtLastError() );
sumUpBlkCounts<<<1,1>>>(g_d_blockCounts,g_h_BlkCount);
HANDLE_ERROR( cudaDeviceSynchronize() );
HANDLE_ERROR( cudaMemcpy(g_h_output,g_d_blockCounts,sizeof(IntegerType),cudaMemcpyDeviceToHost) );
return 4.0 * g_h_output[0] / samples;
}
// void wrappers for timeit()
static void wrap_cpu() { pi_cpu(); }
static void wrap_af() { pi_af(); }
static void wrap_cuda() { pi_cuda(); }
static void experiment(const char *method, double time, double error, double cpu_time)
{
printf("%10s: %7.5f seconds, error=%.8f", method, time, error);
if (time > cpu_time) printf(" ... needs speed!");
if (error > 1e-3) printf(" ... needs accuracy!");
putchar('\n');
}
int main(int argc, char** argv)
{
try {
// perform timings and calculate error from reference af::Pi
info();
double t_cpu = timeit(wrap_cpu), e_cpu = fabs(af::Pi - pi_cpu());
double t_af = timeit(wrap_af), e_af = fabs(af::Pi - pi_af());
pi_init_cuda();
double t_cuda = timeit(wrap_cuda), e_cuda = fabs(af::Pi - pi_cuda());
cudaFree(g_d_blockCounts);
// print results
experiment("cpu", t_cpu, e_cpu, t_cpu);
experiment("arrayfire", t_af, e_af, t_cpu);
experiment("cuda", t_cuda, e_cuda, t_cpu);
} catch (af::exception& e) {
fprintf(stderr, "%s\n", e.what());
throw;
}
#ifdef WIN32 // pause in Windows
if (!(argc == 2 && argv[1][0] == '-')) {
printf("hit [enter]...");
getchar();
}
#endif
return 0;
}
|
1d1dc13c3db2e9884efa98356672fa149cef8fa5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_addf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_addf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_addf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_addf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
1d1dc13c3db2e9884efa98356672fa149cef8fa5.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_addf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_addf<<<gridBlock,threadBlock>>>(n,result,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_addf<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_addf<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
f5792340e8d1567906c6fcc27b6051aaabeab8ef.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Cholesky decomposition.
* Host code.
* Author: Naga Kandasamy
* Date: May 23, 2013
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <winsock2.h>
#include "systime.h"
// includes, kernels
#include "chol_kernel.cu"
#include "utils.h"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
Matrix allocate_matrix_on_gpu(const Matrix M);
Matrix allocate_matrix(int num_rows, int num_columns, int init);
void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost);
void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice);
void check_error(const char *msg);
extern Matrix create_positive_definite_matrix(unsigned int, unsigned int);
extern "C" int chol_gold(const Matrix, Matrix);
extern "C" int check_chol(const Matrix, const Matrix);
void chol_on_device(const Matrix, Matrix);
void chol_on_device_optimized(const Matrix, Matrix);
void chol_on_device_cudaUFMG(const Matrix, Matrix);
extern void print_matrix_to_file(const Matrix M, char *filename);
//Globals
double time_cpu;
// Matrices for the program
Matrix A; // The N x N input matrix
Matrix reference; // The upper triangular matrix computed by the CPU
Matrix U_on_device; // The upper triangular matrix computed by the device (slow)
Matrix U_on_device_fast; // The upper triangular matrix computed by the device (fast)
Matrix U_on_device_cudaUFMG;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// Check command line arguments
if (argc > 1) {
printf("Error. This program accepts no arguments. \n");
exit(0);
}
// Initialize the random number generator with a seed value
srand(time(NULL));
// Create the positive definite matrix. May require a few tries if we are unlucky
int success = 0;
while (!success) {
A = create_positive_definite_matrix(MATRIX_SIZE, MATRIX_SIZE);
if (A.elements != NULL)
success = 1;
}
reference = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0); // Create a matrix to store the CPU result
U_on_device = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0); // Create a matrix to store the device result
U_on_device_fast = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0);
U_on_device_cudaUFMG = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0);
//Compute the Cholesky decomposition on the CPU
printf("== CPU ==\n");
int status = 1;
start_time();
status = chol_gold(A, reference);
time_cpu = show_time();
if (status == 0) {
printf("Cholesky decomposition failed. The input matrix is not positive definite. \n");
exit(0);
}
#if 0
printf("Double checking for correctness by recovering the original matrix. \n");
if(check_chol(A, reference) == 0){
printf("CPU: FAILED\n");
exit(0);
}
#endif
printf(" PASSED\n"); //IT IS SO PERFECT WE DON'T EVEN CHECK.
//Slow
//Perform the Cholesky decomposition on the GPU. The resulting upper triangular matrix should be retured in U_on_gpu
// chol_on_device(A, U_on_device);
//return 1;
//Optimized
//Perform the Cholesky decomposition on the GPU. The resulting upper triangular matrix should be retured in U_on_gpu
// chol_on_device_optimized(A, U_on_device_fast);
//Optimized for project at UFMG
chol_on_device_cudaUFMG(A, U_on_device_cudaUFMG);
// Free host matrices
free(A.elements);
free(U_on_device.elements);
free(U_on_device_fast.elements);
free(reference.elements);
return 1;
}
//Error helper
void check_for_error(char *msg) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
printf("CUDA ERROR: %s (%s). \n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
unsigned compareArrays(float *reference, float * device, int size){
//printf("\nSize= %d", size);
for(int i=0; i<size; i++) {
float epsilon = 0.1;
int x = i / MATRIX_SIZE;
int y = i % MATRIX_SIZE;
if(x==y){
epsilon = 1;
}
if(i<100){
//printf("\nreference=%f \ndevice=%f \nepsilon=%f" , reference[i], device[i], epsilon);
}
if (fabs(reference[i] - device[i]) > epsilon) {
printf("\ni=%d : reference=%f != device=%f | x=%d y=%d \n" , i, reference[i], device[i], x, y);
return 0;
}
}
return 1;
}
/* Write code to perform Cholesky decopmposition on the device. */
void chol_on_device(const Matrix A, Matrix U) {
//Slow
//Perform the Cholesky decomposition on the GPU. The resulting upper triangular matrix should be retured in U_on_gpu
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//Maximum size expected is 8192x8192
//Will be splitting the elimination i loop
//Which has up to MATRIX_SIZE iterations
//So we would optimally use 8192 threads
//Thus requiring 16 blocks
//Rather than attempting to syncronize 16 blocks
//Where each thread does one operation per outer K iteration
//Just have one block and have each thread do 16 operations
//(in the worst case)
int num_blocks = 1;
//Max per block threads
int threads_per_block = 512;
//Operations per thread
int ops_per_thread = MATRIX_SIZE / (threads_per_block * num_blocks);
printf("== GPU (Slow) ==\n");
printf(" Threads per block: %d\n", threads_per_block);
printf(" Number of blocks: %d\n", num_blocks);
printf(" Operations per thread: %d\n", ops_per_thread);
hipEventRecord(start, 0);
//A and U are already allocated on CPU already
//Allocate space on gpu
Matrix gpu_u = allocate_matrix_on_gpu(U);
//Copy matrices to gpu, copy A right into U
copy_matrix_to_device(gpu_u, A);
//Set up the execution grid on the GPU
dim3 thread_block(threads_per_block, 1, 1);
dim3 grid(num_blocks, 1);
// Launch the kernel <<<grid, thread_block>>>
chol_kernel << <grid, thread_block>>>(gpu_u.elements, ops_per_thread);
//Sync at end and check for errors
hipDeviceSynchronize();
check_for_error("SLOW KERNEL FAILURE\n");
float time_gpu;
//Copy data back
copy_matrix_from_device(U, gpu_u);
//Stop timer before copy back
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time_gpu, start, stop);
//Free memory on device
hipFree(gpu_u.elements);
printf(" Run time: %0.10f ms. \n", time_gpu);
printf(" Speedup: %0.10f\n", time_cpu / time_gpu);
//Check if the device result is equivalent to the expected solution. If you can't meet the desired tolerance, try using double precision support.
unsigned int size = reference.num_rows * reference.num_columns;
unsigned res = compareArrays(reference.elements, U.elements, size);
printf(" %s\n", (1 == res) ? "PASSED" : "FAILED");
}
/* Write code to perform Cholesky decopmposition on the device. */
void chol_on_device_optimized(const Matrix A, Matrix U) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
printf("== GPU (Fast) ==\n");
//A and U are already allocated on CPU already
//Each thread within a block will take some j iterations
int threads_per_block = 256; //Optimal
//Stride size should equal threads per block - just cause?
int stride = threads_per_block;
printf(" Threads per block / stride: %d\n", threads_per_block);
//Start timer BEFORE copy
hipEventRecord(start, 0);
//Allocate space on gpu for U
Matrix gpu_u = allocate_matrix_on_gpu(U);
//Copy matrices to gpu, copy A right into U
copy_matrix_to_device(gpu_u, A);
//Each kernel call will be one iteration of out K loop
int k;
for (k = 0; k < MATRIX_SIZE; k++) {
//Want threads to stride across memory
//i is outer loop
//j is inner loop
//so threads should split the j loop
//Each thread block will take an i iteration
int isize = (MATRIX_SIZE - 1) - (k + 1) + 1;
int num_blocks = isize;
if (num_blocks <= 0) {
num_blocks = 1;
}
//Set up the execution grid on the GPU
//printf(" Threads per block: %d\n",threads_per_block);
//printf(" Number of blocks: %d\n",num_blocks);
dim3 thread_block(threads_per_block, 1, 1);
dim3 grid(num_blocks, 1);
//Call the div kernel for this k iteration
chol_kernel_optimized_div << <grid, thread_block>>>(
gpu_u.elements,
k,
stride);
//Call kernel with for this K iteration
chol_kernel_optimized << <grid, thread_block>>>(gpu_u.elements,k,stride);
//Sync at end and check for errors
hipDeviceSynchronize();
check_for_error("FAST KERNEL FAILURE");
}
//Sync at end
hipDeviceSynchronize();
//Copy data back
copy_matrix_from_device(U, gpu_u);
//Stop timer after copy back
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float time_gpu_fast;
hipEventElapsedTime(&time_gpu_fast, start, stop);
//Free memory on device
hipFree(gpu_u.elements);
//As the final step, zero out the lower triangular portion of U
int i, j;
for (i = 0; i < MATRIX_SIZE; i++)
for (j = 0; j < i; j++)
U.elements[i * MATRIX_SIZE + j] = 0.0;
printf(" Run time: %0.10f ms. \n", time_gpu_fast / 1000);
printf(" Speedup: %0.10f\n", time_cpu / (time_gpu_fast / 1000));
//Check if the device result is equivalent to the expected solution. If you can't meet the desired tolerance, try using double precision support.
unsigned int size_fast = reference.num_rows * reference.num_columns;
unsigned res = compareArrays(reference.elements, U_on_device_fast.elements, size_fast);
printf(" %s\n", (1 == res) ? "PASSED" : "FAILED");
//print_matrix_to_file(U,"debug_chol_optimized\\matrix-GPU-div.txt");
//print_matrix_to_file(U,"debug_chol_optimized\\matrix-GPU-final.txt");
//print_matrix_to_file(reference,"debug_chol_optimized\\matrix-CPU-div.txt");
// CUTBoolean res_fast = cutComparefe(reference.elements, U_on_device_fast.elements, size_fast, 0.1f);
// printf(" %s\n", (1 == res_fast) ? "PASSED" : "FAILED");
}
/* Optimized for UFMG CUDA course project */
void chol_on_device_cudaUFMG(const Matrix A, Matrix U) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
printf("== GPU (UFMG) ==\n");
/*
* Shared memory per block: 48k = 49152 bytes
* Total bytes for matrix = MATRIX_SIZE x MATRIX_SIZE x size_of(float)
* 1 element = 1 scalar of a matrix
* Limited by shared memory, a maximum of 49152 / size_of(float) elements can be copied to shared memory on each interation.
* Max elements for thread = 12k elements
*/
//Allocate space on gpu for U
Matrix gpu_u = allocate_matrix_on_gpu(U);
//Start timer BEFORE copy
hipEventRecord(start, 0);
//Copy matrices to gpu, copy A right into U
copy_matrix_to_device(gpu_u, A);
int threads_per_block_sqrt = 512;
int blocks_sqrt = MATRIX_SIZE / threads_per_block_sqrt;
dim3 thread_block(threads_per_block_sqrt, 1, 1);
dim3 grid(blocks_sqrt, 1);
hipLaunchKernelGGL(( chol_kernel_cudaUFMG_sqrt) , dim3(grid), dim3(thread_block), 0, 0, gpu_u.elements);
int block_x_div = 16;
int block_y_div = 16;
int thread_x_div = 4;
int thread_y_div = 4;
dim3 grid_div(block_x_div, block_y_div, 1);
dim3 thread_block_div(thread_x_div, thread_y_div, 1);
int elements_per_thread_div = ((MATRIX_SIZE * MATRIX_SIZE) / 2) / (thread_x_div * thread_y_div * block_x_div * block_y_div);
hipLaunchKernelGGL(( chol_kernel_cudaUFMG_division) , dim3(grid_div), dim3(thread_block_div) , 0, 0, gpu_u.elements, elements_per_thread_div);
#if 1
int block_y_eli = 1;
//Each thread within a block will take some j iterations
int thread_x_eli = 256;
int thread_y_eli = 1;
//hipStream_t stream1;
//hipStreamCreate(&stream1);
//Each kernel call will be one iteration of out K loop
for (int k = 0; k < MATRIX_SIZE; k++) {
//Want threads to stride across memory
//i is outer loop
//j is inner loop
//so threads should split the j loop
//Each thread block will take an i iteration
// i=k+1;i<MATRIX_SIZE
int isize = MATRIX_SIZE - (k + 1);
if(isize==0){
isize++;
}
int block_x_eli = isize;
//Set up the execution grid on the GPU
//printf(" Threads per block: %d\n",threads_per_block);
//printf(" Number of blocks: %d\n",num_blocks);
dim3 thread_block(thread_x_eli, 1, 1);
dim3 grid(block_x_eli, 1);
//Call kernel with for this K iteration
hipLaunchKernelGGL(( chol_kernel_cudaUFMG_elimination) , dim3(grid), dim3(thread_block/*), 0, stream1*/, 0, 0, gpu_u.elements, k);
//Sync at end and check for errors
//hipDeviceSynchronize();
//check_for_error("FAST KERNEL FAILURE");
}
//hipStreamSynchronize (stream1);
//Sync at end
//hipDeviceSynchronize();
#endif
hipLaunchKernelGGL(( chol_kernel_cudaUFMG_zero) , dim3(grid_div), dim3(thread_block_div), 0, 0, gpu_u.elements, elements_per_thread_div);
/*---------------------------------------------*/
//Copy data back
copy_matrix_from_device(U, gpu_u);
//CUDA_SAFE_CALL(hipPeekAtLastError());
//return;
//Stop timer after copy back
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
float time_gpu_fast;
CUDA_SAFE_CALL(hipEventElapsedTime(&time_gpu_fast, start, stop));
//Free memory on device
hipFree(gpu_u.elements);
//Set up the execution grid on the GPU
printf("Threads per block sqrt: %d\n", threads_per_block_sqrt);
printf("Number of blocks sqrt: %d\n", blocks_sqrt);
printf("Elements_per_thread div: %d\n", elements_per_thread_div);
printf(" Run time: %0.10f s. \n", time_gpu_fast / 1000);
printf(" Speedup: %0.10f\n", time_cpu / (time_gpu_fast / 1000) ) ;
//Check if the device result is equivalent to the expected solution. If you can't meet the desired tolerance, try using double precision support.
unsigned int size_fast = reference.num_rows * reference.num_columns;
//print_matrix_to_file(U,"matrix-CUDA.txt");
//print_matrix_to_file(reference,"matrix-CPU.txt");
unsigned res = compareArrays(reference.elements, U.elements, size_fast);
if(res==1){
printf("PASSED: GPU = CPU");
}
else{
printf("FAILED: GPU != CPU");
}
#if 0
//Each thread within a block will take some j iterations
int threads_per_block = 256; //Optimal
//Stride size should equal threads per block - just cause?
int stride = threads_per_block;
printf(" Threads per block / stride: %d\n", threads_per_block);
const int shared_memory_size = 49152;
// Limited by shared memory.
int element_per_shared = shared_memory_size / size_of(float);
int elements_per_thread = (element_per_shared / max_threads_per_block) -1;
//Each kernel call will be one iteration of out K loop
int k;
for (k = 0; k < MATRIX_SIZE; k++) {
//Want threads to stride across memory
//i is outer loop
//j is inner loop
//so threads should split the j loop
//Each thread block will take an i iteration
int isize = (MATRIX_SIZE - 1) - (k + 1) + 1;
int num_blocks = isize;
if (num_blocks <= 0) {
num_blocks = 1;
}
//Call kernel with for this K iteration
chol_kernel_optimized << <grid, thread_block>>>(
gpu_u.elements,
k,
stride);
//Sync at end and check for errors
hipDeviceSynchronize();
check_for_error("FAST KERNEL FAILURE");
}
//Sync at end
hipDeviceSynchronize();
//Copy data back
copy_matrix_from_device(U, gpu_u);
//As the final step, zero out the lower triangular portion of U
int i, j;
for (i = 0; i < MATRIX_SIZE; i++)
for (j = 0; j < i; j++)
U.elements[i * MATRIX_SIZE + j] = 0.0;
// CUTBoolean res_fast = cutComparefe(reference.elements, U_on_device_fast.elements, size_fast, 0.1f);
// printf(" %s\n", (1 == res_fast) ? "PASSED" : "FAILED");
#endif
}
// Allocate a device matrix of same size as M.
Matrix allocate_matrix_on_gpu(const Matrix M) {
Matrix Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof (float);
hipMalloc((void**) &Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix allocate_matrix(int num_rows, int num_columns, int init) {
Matrix M;
M.num_columns = M.pitch = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float *) malloc(size * sizeof (float));
for (unsigned int i = 0; i < size; i++) {
if (init == 0) M.elements[i] = 0;
else
M.elements[i] = (float) rand() / (float) RAND_MAX;
}
return M;
}
// Copy a host matrix to a device matrix.
void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost) {
int size = Mhost.num_rows * Mhost.num_columns * sizeof (float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice) {
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof (float);
CUDA_SAFE_CALL(hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost));
}
void check_error(const char *msg) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
printf("CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
f5792340e8d1567906c6fcc27b6051aaabeab8ef.cu
|
/* Cholesky decomposition.
* Host code.
* Author: Naga Kandasamy
* Date: May 23, 2013
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <winsock2.h>
#include "systime.h"
// includes, kernels
#include "chol_kernel.cu"
#include "utils.h"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
Matrix allocate_matrix_on_gpu(const Matrix M);
Matrix allocate_matrix(int num_rows, int num_columns, int init);
void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost);
void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice);
void check_error(const char *msg);
extern Matrix create_positive_definite_matrix(unsigned int, unsigned int);
extern "C" int chol_gold(const Matrix, Matrix);
extern "C" int check_chol(const Matrix, const Matrix);
void chol_on_device(const Matrix, Matrix);
void chol_on_device_optimized(const Matrix, Matrix);
void chol_on_device_cudaUFMG(const Matrix, Matrix);
extern void print_matrix_to_file(const Matrix M, char *filename);
//Globals
double time_cpu;
// Matrices for the program
Matrix A; // The N x N input matrix
Matrix reference; // The upper triangular matrix computed by the CPU
Matrix U_on_device; // The upper triangular matrix computed by the device (slow)
Matrix U_on_device_fast; // The upper triangular matrix computed by the device (fast)
Matrix U_on_device_cudaUFMG;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// Check command line arguments
if (argc > 1) {
printf("Error. This program accepts no arguments. \n");
exit(0);
}
// Initialize the random number generator with a seed value
srand(time(NULL));
// Create the positive definite matrix. May require a few tries if we are unlucky
int success = 0;
while (!success) {
A = create_positive_definite_matrix(MATRIX_SIZE, MATRIX_SIZE);
if (A.elements != NULL)
success = 1;
}
reference = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0); // Create a matrix to store the CPU result
U_on_device = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0); // Create a matrix to store the device result
U_on_device_fast = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0);
U_on_device_cudaUFMG = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0);
//Compute the Cholesky decomposition on the CPU
printf("== CPU ==\n");
int status = 1;
start_time();
status = chol_gold(A, reference);
time_cpu = show_time();
if (status == 0) {
printf("Cholesky decomposition failed. The input matrix is not positive definite. \n");
exit(0);
}
#if 0
printf("Double checking for correctness by recovering the original matrix. \n");
if(check_chol(A, reference) == 0){
printf("CPU: FAILED\n");
exit(0);
}
#endif
printf(" PASSED\n"); //IT IS SO PERFECT WE DON'T EVEN CHECK.
//Slow
//Perform the Cholesky decomposition on the GPU. The resulting upper triangular matrix should be retured in U_on_gpu
// chol_on_device(A, U_on_device);
//return 1;
//Optimized
//Perform the Cholesky decomposition on the GPU. The resulting upper triangular matrix should be retured in U_on_gpu
// chol_on_device_optimized(A, U_on_device_fast);
//Optimized for project at UFMG
chol_on_device_cudaUFMG(A, U_on_device_cudaUFMG);
// Free host matrices
free(A.elements);
free(U_on_device.elements);
free(U_on_device_fast.elements);
free(reference.elements);
return 1;
}
//Error helper
void check_for_error(char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
printf("CUDA ERROR: %s (%s). \n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
unsigned compareArrays(float *reference, float * device, int size){
//printf("\nSize= %d", size);
for(int i=0; i<size; i++) {
float epsilon = 0.1;
int x = i / MATRIX_SIZE;
int y = i % MATRIX_SIZE;
if(x==y){
epsilon = 1;
}
if(i<100){
//printf("\nreference=%f \ndevice=%f \nepsilon=%f" , reference[i], device[i], epsilon);
}
if (fabs(reference[i] - device[i]) > epsilon) {
printf("\ni=%d : reference=%f != device=%f | x=%d y=%d \n" , i, reference[i], device[i], x, y);
return 0;
}
}
return 1;
}
/* Write code to perform Cholesky decopmposition on the device. */
void chol_on_device(const Matrix A, Matrix U) {
//Slow
//Perform the Cholesky decomposition on the GPU. The resulting upper triangular matrix should be retured in U_on_gpu
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Maximum size expected is 8192x8192
//Will be splitting the elimination i loop
//Which has up to MATRIX_SIZE iterations
//So we would optimally use 8192 threads
//Thus requiring 16 blocks
//Rather than attempting to syncronize 16 blocks
//Where each thread does one operation per outer K iteration
//Just have one block and have each thread do 16 operations
//(in the worst case)
int num_blocks = 1;
//Max per block threads
int threads_per_block = 512;
//Operations per thread
int ops_per_thread = MATRIX_SIZE / (threads_per_block * num_blocks);
printf("== GPU (Slow) ==\n");
printf(" Threads per block: %d\n", threads_per_block);
printf(" Number of blocks: %d\n", num_blocks);
printf(" Operations per thread: %d\n", ops_per_thread);
cudaEventRecord(start, 0);
//A and U are already allocated on CPU already
//Allocate space on gpu
Matrix gpu_u = allocate_matrix_on_gpu(U);
//Copy matrices to gpu, copy A right into U
copy_matrix_to_device(gpu_u, A);
//Set up the execution grid on the GPU
dim3 thread_block(threads_per_block, 1, 1);
dim3 grid(num_blocks, 1);
// Launch the kernel <<<grid, thread_block>>>
chol_kernel << <grid, thread_block>>>(gpu_u.elements, ops_per_thread);
//Sync at end and check for errors
cudaThreadSynchronize();
check_for_error("SLOW KERNEL FAILURE\n");
float time_gpu;
//Copy data back
copy_matrix_from_device(U, gpu_u);
//Stop timer before copy back
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_gpu, start, stop);
//Free memory on device
cudaFree(gpu_u.elements);
printf(" Run time: %0.10f ms. \n", time_gpu);
printf(" Speedup: %0.10f\n", time_cpu / time_gpu);
//Check if the device result is equivalent to the expected solution. If you can't meet the desired tolerance, try using double precision support.
unsigned int size = reference.num_rows * reference.num_columns;
unsigned res = compareArrays(reference.elements, U.elements, size);
printf(" %s\n", (1 == res) ? "PASSED" : "FAILED");
}
/* Write code to perform Cholesky decopmposition on the device. */
void chol_on_device_optimized(const Matrix A, Matrix U) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("== GPU (Fast) ==\n");
//A and U are already allocated on CPU already
//Each thread within a block will take some j iterations
int threads_per_block = 256; //Optimal
//Stride size should equal threads per block - just cause?
int stride = threads_per_block;
printf(" Threads per block / stride: %d\n", threads_per_block);
//Start timer BEFORE copy
cudaEventRecord(start, 0);
//Allocate space on gpu for U
Matrix gpu_u = allocate_matrix_on_gpu(U);
//Copy matrices to gpu, copy A right into U
copy_matrix_to_device(gpu_u, A);
//Each kernel call will be one iteration of out K loop
int k;
for (k = 0; k < MATRIX_SIZE; k++) {
//Want threads to stride across memory
//i is outer loop
//j is inner loop
//so threads should split the j loop
//Each thread block will take an i iteration
int isize = (MATRIX_SIZE - 1) - (k + 1) + 1;
int num_blocks = isize;
if (num_blocks <= 0) {
num_blocks = 1;
}
//Set up the execution grid on the GPU
//printf(" Threads per block: %d\n",threads_per_block);
//printf(" Number of blocks: %d\n",num_blocks);
dim3 thread_block(threads_per_block, 1, 1);
dim3 grid(num_blocks, 1);
//Call the div kernel for this k iteration
chol_kernel_optimized_div << <grid, thread_block>>>(
gpu_u.elements,
k,
stride);
//Call kernel with for this K iteration
chol_kernel_optimized << <grid, thread_block>>>(gpu_u.elements,k,stride);
//Sync at end and check for errors
cudaThreadSynchronize();
check_for_error("FAST KERNEL FAILURE");
}
//Sync at end
cudaThreadSynchronize();
//Copy data back
copy_matrix_from_device(U, gpu_u);
//Stop timer after copy back
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float time_gpu_fast;
cudaEventElapsedTime(&time_gpu_fast, start, stop);
//Free memory on device
cudaFree(gpu_u.elements);
//As the final step, zero out the lower triangular portion of U
int i, j;
for (i = 0; i < MATRIX_SIZE; i++)
for (j = 0; j < i; j++)
U.elements[i * MATRIX_SIZE + j] = 0.0;
printf(" Run time: %0.10f ms. \n", time_gpu_fast / 1000);
printf(" Speedup: %0.10f\n", time_cpu / (time_gpu_fast / 1000));
//Check if the device result is equivalent to the expected solution. If you can't meet the desired tolerance, try using double precision support.
unsigned int size_fast = reference.num_rows * reference.num_columns;
unsigned res = compareArrays(reference.elements, U_on_device_fast.elements, size_fast);
printf(" %s\n", (1 == res) ? "PASSED" : "FAILED");
//print_matrix_to_file(U,"debug_chol_optimized\\matrix-GPU-div.txt");
//print_matrix_to_file(U,"debug_chol_optimized\\matrix-GPU-final.txt");
//print_matrix_to_file(reference,"debug_chol_optimized\\matrix-CPU-div.txt");
// CUTBoolean res_fast = cutComparefe(reference.elements, U_on_device_fast.elements, size_fast, 0.1f);
// printf(" %s\n", (1 == res_fast) ? "PASSED" : "FAILED");
}
/* Optimized for UFMG CUDA course project */
void chol_on_device_cudaUFMG(const Matrix A, Matrix U) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("== GPU (UFMG) ==\n");
/*
* Shared memory per block: 48k = 49152 bytes
* Total bytes for matrix = MATRIX_SIZE x MATRIX_SIZE x size_of(float)
* 1 element = 1 scalar of a matrix
* Limited by shared memory, a maximum of 49152 / size_of(float) elements can be copied to shared memory on each interation.
* Max elements for thread = 12k elements
*/
//Allocate space on gpu for U
Matrix gpu_u = allocate_matrix_on_gpu(U);
//Start timer BEFORE copy
cudaEventRecord(start, 0);
//Copy matrices to gpu, copy A right into U
copy_matrix_to_device(gpu_u, A);
int threads_per_block_sqrt = 512;
int blocks_sqrt = MATRIX_SIZE / threads_per_block_sqrt;
dim3 thread_block(threads_per_block_sqrt, 1, 1);
dim3 grid(blocks_sqrt, 1);
chol_kernel_cudaUFMG_sqrt <<<grid, thread_block>>>(gpu_u.elements);
int block_x_div = 16;
int block_y_div = 16;
int thread_x_div = 4;
int thread_y_div = 4;
dim3 grid_div(block_x_div, block_y_div, 1);
dim3 thread_block_div(thread_x_div, thread_y_div, 1);
int elements_per_thread_div = ((MATRIX_SIZE * MATRIX_SIZE) / 2) / (thread_x_div * thread_y_div * block_x_div * block_y_div);
chol_kernel_cudaUFMG_division <<<grid_div, thread_block_div >>>(gpu_u.elements, elements_per_thread_div);
#if 1
int block_y_eli = 1;
//Each thread within a block will take some j iterations
int thread_x_eli = 256;
int thread_y_eli = 1;
//cudaStream_t stream1;
//cudaStreamCreate(&stream1);
//Each kernel call will be one iteration of out K loop
for (int k = 0; k < MATRIX_SIZE; k++) {
//Want threads to stride across memory
//i is outer loop
//j is inner loop
//so threads should split the j loop
//Each thread block will take an i iteration
// i=k+1;i<MATRIX_SIZE
int isize = MATRIX_SIZE - (k + 1);
if(isize==0){
isize++;
}
int block_x_eli = isize;
//Set up the execution grid on the GPU
//printf(" Threads per block: %d\n",threads_per_block);
//printf(" Number of blocks: %d\n",num_blocks);
dim3 thread_block(thread_x_eli, 1, 1);
dim3 grid(block_x_eli, 1);
//Call kernel with for this K iteration
chol_kernel_cudaUFMG_elimination <<<grid, thread_block/*, 0, stream1*/>>>(gpu_u.elements, k);
//Sync at end and check for errors
//cudaThreadSynchronize();
//check_for_error("FAST KERNEL FAILURE");
}
//cudaStreamSynchronize (stream1);
//Sync at end
//cudaThreadSynchronize();
#endif
chol_kernel_cudaUFMG_zero <<<grid_div, thread_block_div>>>(gpu_u.elements, elements_per_thread_div);
/*---------------------------------------------*/
//Copy data back
copy_matrix_from_device(U, gpu_u);
//CUDA_SAFE_CALL(cudaPeekAtLastError());
//return;
//Stop timer after copy back
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
float time_gpu_fast;
CUDA_SAFE_CALL(cudaEventElapsedTime(&time_gpu_fast, start, stop));
//Free memory on device
cudaFree(gpu_u.elements);
//Set up the execution grid on the GPU
printf("Threads per block sqrt: %d\n", threads_per_block_sqrt);
printf("Number of blocks sqrt: %d\n", blocks_sqrt);
printf("Elements_per_thread div: %d\n", elements_per_thread_div);
printf(" Run time: %0.10f s. \n", time_gpu_fast / 1000);
printf(" Speedup: %0.10f\n", time_cpu / (time_gpu_fast / 1000) ) ;
//Check if the device result is equivalent to the expected solution. If you can't meet the desired tolerance, try using double precision support.
unsigned int size_fast = reference.num_rows * reference.num_columns;
//print_matrix_to_file(U,"matrix-CUDA.txt");
//print_matrix_to_file(reference,"matrix-CPU.txt");
unsigned res = compareArrays(reference.elements, U.elements, size_fast);
if(res==1){
printf("PASSED: GPU = CPU");
}
else{
printf("FAILED: GPU != CPU");
}
#if 0
//Each thread within a block will take some j iterations
int threads_per_block = 256; //Optimal
//Stride size should equal threads per block - just cause?
int stride = threads_per_block;
printf(" Threads per block / stride: %d\n", threads_per_block);
const int shared_memory_size = 49152;
// Limited by shared memory.
int element_per_shared = shared_memory_size / size_of(float);
int elements_per_thread = (element_per_shared / max_threads_per_block) -1;
//Each kernel call will be one iteration of out K loop
int k;
for (k = 0; k < MATRIX_SIZE; k++) {
//Want threads to stride across memory
//i is outer loop
//j is inner loop
//so threads should split the j loop
//Each thread block will take an i iteration
int isize = (MATRIX_SIZE - 1) - (k + 1) + 1;
int num_blocks = isize;
if (num_blocks <= 0) {
num_blocks = 1;
}
//Call kernel with for this K iteration
chol_kernel_optimized << <grid, thread_block>>>(
gpu_u.elements,
k,
stride);
//Sync at end and check for errors
cudaThreadSynchronize();
check_for_error("FAST KERNEL FAILURE");
}
//Sync at end
cudaThreadSynchronize();
//Copy data back
copy_matrix_from_device(U, gpu_u);
//As the final step, zero out the lower triangular portion of U
int i, j;
for (i = 0; i < MATRIX_SIZE; i++)
for (j = 0; j < i; j++)
U.elements[i * MATRIX_SIZE + j] = 0.0;
// CUTBoolean res_fast = cutComparefe(reference.elements, U_on_device_fast.elements, size_fast, 0.1f);
// printf(" %s\n", (1 == res_fast) ? "PASSED" : "FAILED");
#endif
}
// Allocate a device matrix of same size as M.
Matrix allocate_matrix_on_gpu(const Matrix M) {
Matrix Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof (float);
cudaMalloc((void**) &Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix allocate_matrix(int num_rows, int num_columns, int init) {
Matrix M;
M.num_columns = M.pitch = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float *) malloc(size * sizeof (float));
for (unsigned int i = 0; i < size; i++) {
if (init == 0) M.elements[i] = 0;
else
M.elements[i] = (float) rand() / (float) RAND_MAX;
}
return M;
}
// Copy a host matrix to a device matrix.
void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost) {
int size = Mhost.num_rows * Mhost.num_columns * sizeof (float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice) {
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof (float);
CUDA_SAFE_CALL(cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost));
}
void check_error(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
printf("CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
3ed2745af2812ccb016def6e1547e138aab7a2f2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernelFindMax2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int *dataArray = NULL;
hipMalloc(&dataArray, XSIZE*YSIZE);
int arraySize = XSIZE*YSIZE;
int *maxVal = NULL;
hipMalloc(&maxVal, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernelFindMax2), dim3(gridBlock),dim3(threadBlock), 0, 0, dataArray,arraySize,maxVal);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernelFindMax2), dim3(gridBlock),dim3(threadBlock), 0, 0, dataArray,arraySize,maxVal);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernelFindMax2), dim3(gridBlock),dim3(threadBlock), 0, 0, dataArray,arraySize,maxVal);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
3ed2745af2812ccb016def6e1547e138aab7a2f2.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernelFindMax2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int *dataArray = NULL;
cudaMalloc(&dataArray, XSIZE*YSIZE);
int arraySize = XSIZE*YSIZE;
int *maxVal = NULL;
cudaMalloc(&maxVal, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernelFindMax2<<<gridBlock,threadBlock>>>(dataArray,arraySize,maxVal);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernelFindMax2<<<gridBlock,threadBlock>>>(dataArray,arraySize,maxVal);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernelFindMax2<<<gridBlock,threadBlock>>>(dataArray,arraySize,maxVal);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
778f42c699fe3692dd8efaab0f4fc410efc7eb53.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/warp_shuffle.hpp"
namespace cv { namespace gpu { namespace cudev
{
// Other values are not supported
#define CELL_WIDTH 8
#define CELL_HEIGHT 8
#define CELLS_PER_BLOCK_X 2
#define CELLS_PER_BLOCK_Y 2
namespace hog
{
__constant__ int cnbins;
__constant__ int cblock_stride_x;
__constant__ int cblock_stride_y;
__constant__ int cnblocks_win_x;
__constant__ int cnblocks_win_y;
__constant__ int cblock_hist_size;
__constant__ int cblock_hist_size_2up;
__constant__ int cdescr_size;
__constant__ int cdescr_width;
/* Returns the nearest upper power of two, works only for
the typical GPU thread count (pert block) values */
int power_2up(unsigned int n)
{
if (n < 1) return 1;
else if (n < 2) return 2;
else if (n < 4) return 4;
else if (n < 8) return 8;
else if (n < 16) return 16;
else if (n < 32) return 32;
else if (n < 64) return 64;
else if (n < 128) return 128;
else if (n < 256) return 256;
else if (n < 512) return 512;
else if (n < 1024) return 1024;
return -1; // Input is too big
}
void set_up_constants(int nbins, int block_stride_x, int block_stride_y,
int nblocks_win_x, int nblocks_win_y)
{
cudaSafeCall( hipMemcpyToSymbol(cnbins, &nbins, sizeof(nbins)) );
cudaSafeCall( hipMemcpyToSymbol(cblock_stride_x, &block_stride_x, sizeof(block_stride_x)) );
cudaSafeCall( hipMemcpyToSymbol(cblock_stride_y, &block_stride_y, sizeof(block_stride_y)) );
cudaSafeCall( hipMemcpyToSymbol(cnblocks_win_x, &nblocks_win_x, sizeof(nblocks_win_x)) );
cudaSafeCall( hipMemcpyToSymbol(cnblocks_win_y, &nblocks_win_y, sizeof(nblocks_win_y)) );
int block_hist_size = nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y;
cudaSafeCall( hipMemcpyToSymbol(cblock_hist_size, &block_hist_size, sizeof(block_hist_size)) );
int block_hist_size_2up = power_2up(block_hist_size);
cudaSafeCall( hipMemcpyToSymbol(cblock_hist_size_2up, &block_hist_size_2up, sizeof(block_hist_size_2up)) );
int descr_width = nblocks_win_x * block_hist_size;
cudaSafeCall( hipMemcpyToSymbol(cdescr_width, &descr_width, sizeof(descr_width)) );
int descr_size = descr_width * nblocks_win_y;
cudaSafeCall( hipMemcpyToSymbol(cdescr_size, &descr_size, sizeof(descr_size)) );
}
//----------------------------------------------------------------------------
// Histogram computation
template <int nblocks> // Number of histogram blocks processed by single GPU thread block
__global__ void compute_hists_kernel_many_blocks(const int img_block_width, const PtrStepf grad,
const PtrStepb qangle, float scale, float* block_hists)
{
const int block_x = threadIdx.z;
const int cell_x = threadIdx.x / 16;
const int cell_y = threadIdx.y;
const int cell_thread_x = threadIdx.x & 0xF;
if (blockIdx.x * blockDim.z + block_x >= img_block_width)
return;
extern __shared__ float smem[];
float* hists = smem;
float* final_hist = smem + cnbins * 48 * nblocks;
const int offset_x = (blockIdx.x * blockDim.z + block_x) * cblock_stride_x +
4 * cell_x + cell_thread_x;
const int offset_y = blockIdx.y * cblock_stride_y + 4 * cell_y;
const float* grad_ptr = grad.ptr(offset_y) + offset_x * 2;
const unsigned char* qangle_ptr = qangle.ptr(offset_y) + offset_x * 2;
// 12 means that 12 pixels affect on block's cell (in one row)
if (cell_thread_x < 12)
{
float* hist = hists + 12 * (cell_y * blockDim.z * CELLS_PER_BLOCK_Y +
cell_x + block_x * CELLS_PER_BLOCK_X) +
cell_thread_x;
for (int bin_id = 0; bin_id < cnbins; ++bin_id)
hist[bin_id * 48 * nblocks] = 0.f;
const int dist_x = -4 + (int)cell_thread_x - 4 * cell_x;
const int dist_y_begin = -4 - 4 * (int)threadIdx.y;
for (int dist_y = dist_y_begin; dist_y < dist_y_begin + 12; ++dist_y)
{
float2 vote = *(const float2*)grad_ptr;
uchar2 bin = *(const uchar2*)qangle_ptr;
grad_ptr += grad.step/sizeof(float);
qangle_ptr += qangle.step;
int dist_center_y = dist_y - 4 * (1 - 2 * cell_y);
int dist_center_x = dist_x - 4 * (1 - 2 * cell_x);
float gaussian = ::expf(-(dist_center_y * dist_center_y +
dist_center_x * dist_center_x) * scale);
float interp_weight = (8.f - ::fabs(dist_y + 0.5f)) *
(8.f - ::fabs(dist_x + 0.5f)) / 64.f;
hist[bin.x * 48 * nblocks] += gaussian * interp_weight * vote.x;
hist[bin.y * 48 * nblocks] += gaussian * interp_weight * vote.y;
}
volatile float* hist_ = hist;
for (int bin_id = 0; bin_id < cnbins; ++bin_id, hist_ += 48 * nblocks)
{
if (cell_thread_x < 6) hist_[0] += hist_[6];
if (cell_thread_x < 3) hist_[0] += hist_[3];
if (cell_thread_x == 0)
final_hist[((cell_x + block_x * 2) * 2 + cell_y) * cnbins + bin_id]
= hist_[0] + hist_[1] + hist_[2];
}
}
__syncthreads();
float* block_hist = block_hists + (blockIdx.y * img_block_width +
blockIdx.x * blockDim.z + block_x) *
cblock_hist_size;
int tid = (cell_y * CELLS_PER_BLOCK_Y + cell_x) * 16 + cell_thread_x;
if (tid < cblock_hist_size)
block_hist[tid] = final_hist[block_x * cblock_hist_size + tid];
}
void compute_hists(int nbins, int block_stride_x, int block_stride_y,
int height, int width, const PtrStepSzf& grad,
const PtrStepSzb& qangle, float sigma, float* block_hists)
{
const int nblocks = 1;
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) /
block_stride_x;
int img_block_height = (height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y) /
block_stride_y;
dim3 grid(divUp(img_block_width, nblocks), img_block_height);
dim3 threads(32, 2, nblocks);
cudaSafeCall(hipFuncSetCacheConfig(compute_hists_kernel_many_blocks<nblocks>,
hipFuncCachePreferL1));
// Precompute gaussian spatial window parameter
float scale = 1.f / (2.f * sigma * sigma);
int hists_size = (nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y * 12 * nblocks) * sizeof(float);
int final_hists_size = (nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y * nblocks) * sizeof(float);
int smem = hists_size + final_hists_size;
hipLaunchKernelGGL(( compute_hists_kernel_many_blocks<nblocks>), dim3(grid), dim3(threads), smem, 0,
img_block_width, grad, qangle, scale, block_hists);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
//-------------------------------------------------------------
// Normalization of histograms via L2Hys_norm
//
template<int size>
__device__ float reduce_smem(float* smem, float val)
{
unsigned int tid = threadIdx.x;
float sum = val;
reduce<size>(smem, sum, tid, plus<float>());
if (size == 32)
{
#if __CUDA_ARCH__ >= 300
return shfl(sum, 0);
#else
return smem[0];
#endif
}
else
{
#if __CUDA_ARCH__ >= 300
if (threadIdx.x == 0)
smem[0] = sum;
#endif
__syncthreads();
return smem[0];
}
}
template <int nthreads, // Number of threads which process one block historgam
int nblocks> // Number of block hisograms processed by one GPU thread block
__global__ void normalize_hists_kernel_many_blocks(const int block_hist_size,
const int img_block_width,
float* block_hists, float threshold)
{
if (blockIdx.x * blockDim.z + threadIdx.z >= img_block_width)
return;
float* hist = block_hists + (blockIdx.y * img_block_width +
blockIdx.x * blockDim.z + threadIdx.z) *
block_hist_size + threadIdx.x;
__shared__ float sh_squares[nthreads * nblocks];
float* squares = sh_squares + threadIdx.z * nthreads;
float elem = 0.f;
if (threadIdx.x < block_hist_size)
elem = hist[0];
float sum = reduce_smem<nthreads>(squares, elem * elem);
float scale = 1.0f / (::sqrtf(sum) + 0.1f * block_hist_size);
elem = ::min(elem * scale, threshold);
sum = reduce_smem<nthreads>(squares, elem * elem);
scale = 1.0f / (::sqrtf(sum) + 1e-3f);
if (threadIdx.x < block_hist_size)
hist[0] = elem * scale;
}
void normalize_hists(int nbins, int block_stride_x, int block_stride_y,
int height, int width, float* block_hists, float threshold)
{
const int nblocks = 1;
int block_hist_size = nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y;
int nthreads = power_2up(block_hist_size);
dim3 threads(nthreads, 1, nblocks);
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
int img_block_height = (height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y) / block_stride_y;
dim3 grid(divUp(img_block_width, nblocks), img_block_height);
if (nthreads == 32)
hipLaunchKernelGGL(( normalize_hists_kernel_many_blocks<32, nblocks>), dim3(grid), dim3(threads), 0, 0, block_hist_size, img_block_width, block_hists, threshold);
else if (nthreads == 64)
hipLaunchKernelGGL(( normalize_hists_kernel_many_blocks<64, nblocks>), dim3(grid), dim3(threads), 0, 0, block_hist_size, img_block_width, block_hists, threshold);
else if (nthreads == 128)
hipLaunchKernelGGL(( normalize_hists_kernel_many_blocks<64, nblocks>), dim3(grid), dim3(threads), 0, 0, block_hist_size, img_block_width, block_hists, threshold);
else if (nthreads == 256)
hipLaunchKernelGGL(( normalize_hists_kernel_many_blocks<256, nblocks>), dim3(grid), dim3(threads), 0, 0, block_hist_size, img_block_width, block_hists, threshold);
else if (nthreads == 512)
hipLaunchKernelGGL(( normalize_hists_kernel_many_blocks<512, nblocks>), dim3(grid), dim3(threads), 0, 0, block_hist_size, img_block_width, block_hists, threshold);
else
CV_Error(cv::Error::StsBadArg, "normalize_hists: histogram's size is too big, try to decrease number of bins");
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
//---------------------------------------------------------------------
// Linear SVM based classification
//
// return confidence values not just positive location
template <int nthreads, // Number of threads per one histogram block
int nblocks> // Number of histogram block processed by single GPU thread block
__global__ void compute_confidence_hists_kernel_many_blocks(const int img_win_width, const int img_block_width,
const int win_block_stride_x, const int win_block_stride_y,
const float* block_hists, const float* coefs,
float free_coef, float threshold, float* confidences)
{
const int win_x = threadIdx.z;
if (blockIdx.x * blockDim.z + win_x >= img_win_width)
return;
const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
blockIdx.x * win_block_stride_x * blockDim.z + win_x) *
cblock_hist_size;
float product = 0.f;
for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
{
int offset_y = i / cdescr_width;
int offset_x = i - offset_y * cdescr_width;
product += coefs[i] * hist[offset_y * img_block_width * cblock_hist_size + offset_x];
}
__shared__ float products[nthreads * nblocks];
const int tid = threadIdx.z * nthreads + threadIdx.x;
reduce<nthreads>(products, product, tid, plus<float>());
if (threadIdx.x == 0)
confidences[blockIdx.y * img_win_width + blockIdx.x * blockDim.z + win_x] = product + free_coef;
}
void compute_confidence_hists(int win_height, int win_width, int block_stride_y, int block_stride_x,
int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
float* coefs, float free_coef, float threshold, float *confidences)
{
const int nthreads = 256;
const int nblocks = 1;
int win_block_stride_x = win_stride_x / block_stride_x;
int win_block_stride_y = win_stride_y / block_stride_y;
int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
int img_win_height = (height - win_height + win_stride_y) / win_stride_y;
dim3 threads(nthreads, 1, nblocks);
dim3 grid(divUp(img_win_width, nblocks), img_win_height);
cudaSafeCall(hipFuncSetCacheConfig(compute_confidence_hists_kernel_many_blocks<nthreads, nblocks>,
hipFuncCachePreferL1));
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) /
block_stride_x;
hipLaunchKernelGGL(( compute_confidence_hists_kernel_many_blocks<nthreads, nblocks>), dim3(grid), dim3(threads), 0, 0,
img_win_width, img_block_width, win_block_stride_x, win_block_stride_y,
block_hists, coefs, free_coef, threshold, confidences);
cudaSafeCall(hipDeviceSynchronize());
}
template <int nthreads, // Number of threads per one histogram block
int nblocks> // Number of histogram block processed by single GPU thread block
__global__ void classify_hists_kernel_many_blocks(const int img_win_width, const int img_block_width,
const int win_block_stride_x, const int win_block_stride_y,
const float* block_hists, const float* coefs,
float free_coef, float threshold, unsigned char* labels)
{
const int win_x = threadIdx.z;
if (blockIdx.x * blockDim.z + win_x >= img_win_width)
return;
const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
blockIdx.x * win_block_stride_x * blockDim.z + win_x) *
cblock_hist_size;
float product = 0.f;
for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
{
int offset_y = i / cdescr_width;
int offset_x = i - offset_y * cdescr_width;
product += coefs[i] * hist[offset_y * img_block_width * cblock_hist_size + offset_x];
}
__shared__ float products[nthreads * nblocks];
const int tid = threadIdx.z * nthreads + threadIdx.x;
reduce<nthreads>(products, product, tid, plus<float>());
if (threadIdx.x == 0)
labels[blockIdx.y * img_win_width + blockIdx.x * blockDim.z + win_x] = (product + free_coef >= threshold);
}
void classify_hists(int win_height, int win_width, int block_stride_y, int block_stride_x,
int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
float* coefs, float free_coef, float threshold, unsigned char* labels)
{
const int nthreads = 256;
const int nblocks = 1;
int win_block_stride_x = win_stride_x / block_stride_x;
int win_block_stride_y = win_stride_y / block_stride_y;
int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
int img_win_height = (height - win_height + win_stride_y) / win_stride_y;
dim3 threads(nthreads, 1, nblocks);
dim3 grid(divUp(img_win_width, nblocks), img_win_height);
cudaSafeCall(hipFuncSetCacheConfig(classify_hists_kernel_many_blocks<nthreads, nblocks>, hipFuncCachePreferL1));
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
hipLaunchKernelGGL(( classify_hists_kernel_many_blocks<nthreads, nblocks>), dim3(grid), dim3(threads), 0, 0,
img_win_width, img_block_width, win_block_stride_x, win_block_stride_y,
block_hists, coefs, free_coef, threshold, labels);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
//----------------------------------------------------------------------------
// Extract descriptors
template <int nthreads>
__global__ void extract_descrs_by_rows_kernel(const int img_block_width, const int win_block_stride_x, const int win_block_stride_y,
const float* block_hists, PtrStepf descriptors)
{
// Get left top corner of the window in src
const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
blockIdx.x * win_block_stride_x) * cblock_hist_size;
// Get left top corner of the window in dst
float* descriptor = descriptors.ptr(blockIdx.y * gridDim.x + blockIdx.x);
// Copy elements from src to dst
for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
{
int offset_y = i / cdescr_width;
int offset_x = i - offset_y * cdescr_width;
descriptor[i] = hist[offset_y * img_block_width * cblock_hist_size + offset_x];
}
}
void extract_descrs_by_rows(int win_height, int win_width, int block_stride_y, int block_stride_x, int win_stride_y, int win_stride_x,
int height, int width, float* block_hists, PtrStepSzf descriptors)
{
const int nthreads = 256;
int win_block_stride_x = win_stride_x / block_stride_x;
int win_block_stride_y = win_stride_y / block_stride_y;
int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
int img_win_height = (height - win_height + win_stride_y) / win_stride_y;
dim3 threads(nthreads, 1);
dim3 grid(img_win_width, img_win_height);
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
hipLaunchKernelGGL(( extract_descrs_by_rows_kernel<nthreads>), dim3(grid), dim3(threads), 0, 0,
img_block_width, win_block_stride_x, win_block_stride_y, block_hists, descriptors);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
template <int nthreads>
__global__ void extract_descrs_by_cols_kernel(const int img_block_width, const int win_block_stride_x,
const int win_block_stride_y, const float* block_hists,
PtrStepf descriptors)
{
// Get left top corner of the window in src
const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
blockIdx.x * win_block_stride_x) * cblock_hist_size;
// Get left top corner of the window in dst
float* descriptor = descriptors.ptr(blockIdx.y * gridDim.x + blockIdx.x);
// Copy elements from src to dst
for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
{
int block_idx = i / cblock_hist_size;
int idx_in_block = i - block_idx * cblock_hist_size;
int y = block_idx / cnblocks_win_x;
int x = block_idx - y * cnblocks_win_x;
descriptor[(x * cnblocks_win_y + y) * cblock_hist_size + idx_in_block]
= hist[(y * img_block_width + x) * cblock_hist_size + idx_in_block];
}
}
void extract_descrs_by_cols(int win_height, int win_width, int block_stride_y, int block_stride_x,
int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
PtrStepSzf descriptors)
{
const int nthreads = 256;
int win_block_stride_x = win_stride_x / block_stride_x;
int win_block_stride_y = win_stride_y / block_stride_y;
int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
int img_win_height = (height - win_height + win_stride_y) / win_stride_y;
dim3 threads(nthreads, 1);
dim3 grid(img_win_width, img_win_height);
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
hipLaunchKernelGGL(( extract_descrs_by_cols_kernel<nthreads>), dim3(grid), dim3(threads), 0, 0,
img_block_width, win_block_stride_x, win_block_stride_y, block_hists, descriptors);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
//----------------------------------------------------------------------------
// Gradients computation
template <int nthreads, int correct_gamma>
__global__ void compute_gradients_8UC4_kernel(int height, int width, const PtrStepb img,
float angle_scale, PtrStepf grad, PtrStepb qangle)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const uchar4* row = (const uchar4*)img.ptr(blockIdx.y);
__shared__ float sh_row[(nthreads + 2) * 3];
uchar4 val;
if (x < width)
val = row[x];
else
val = row[width - 2];
sh_row[threadIdx.x + 1] = val.x;
sh_row[threadIdx.x + 1 + (nthreads + 2)] = val.y;
sh_row[threadIdx.x + 1 + 2 * (nthreads + 2)] = val.z;
if (threadIdx.x == 0)
{
val = row[::max(x - 1, 1)];
sh_row[0] = val.x;
sh_row[(nthreads + 2)] = val.y;
sh_row[2 * (nthreads + 2)] = val.z;
}
if (threadIdx.x == blockDim.x - 1)
{
val = row[::min(x + 1, width - 2)];
sh_row[blockDim.x + 1] = val.x;
sh_row[blockDim.x + 1 + (nthreads + 2)] = val.y;
sh_row[blockDim.x + 1 + 2 * (nthreads + 2)] = val.z;
}
__syncthreads();
if (x < width)
{
float3 a, b;
b.x = sh_row[threadIdx.x + 2];
b.y = sh_row[threadIdx.x + 2 + (nthreads + 2)];
b.z = sh_row[threadIdx.x + 2 + 2 * (nthreads + 2)];
a.x = sh_row[threadIdx.x];
a.y = sh_row[threadIdx.x + (nthreads + 2)];
a.z = sh_row[threadIdx.x + 2 * (nthreads + 2)];
float3 dx;
if (correct_gamma)
dx = make_float3(::sqrtf(b.x) - ::sqrtf(a.x), ::sqrtf(b.y) - ::sqrtf(a.y), ::sqrtf(b.z) - ::sqrtf(a.z));
else
dx = make_float3(b.x - a.x, b.y - a.y, b.z - a.z);
float3 dy = make_float3(0.f, 0.f, 0.f);
if (blockIdx.y > 0 && blockIdx.y < height - 1)
{
val = ((const uchar4*)img.ptr(blockIdx.y - 1))[x];
a = make_float3(val.x, val.y, val.z);
val = ((const uchar4*)img.ptr(blockIdx.y + 1))[x];
b = make_float3(val.x, val.y, val.z);
if (correct_gamma)
dy = make_float3(::sqrtf(b.x) - ::sqrtf(a.x), ::sqrtf(b.y) - ::sqrtf(a.y), ::sqrtf(b.z) - ::sqrtf(a.z));
else
dy = make_float3(b.x - a.x, b.y - a.y, b.z - a.z);
}
float best_dx = dx.x;
float best_dy = dy.x;
float mag0 = dx.x * dx.x + dy.x * dy.x;
float mag1 = dx.y * dx.y + dy.y * dy.y;
if (mag0 < mag1)
{
best_dx = dx.y;
best_dy = dy.y;
mag0 = mag1;
}
mag1 = dx.z * dx.z + dy.z * dy.z;
if (mag0 < mag1)
{
best_dx = dx.z;
best_dy = dy.z;
mag0 = mag1;
}
mag0 = ::sqrtf(mag0);
float ang = (::atan2f(best_dy, best_dx) + CV_PI_F) * angle_scale - 0.5f;
int hidx = (int)::floorf(ang);
ang -= hidx;
hidx = (hidx + cnbins) % cnbins;
((uchar2*)qangle.ptr(blockIdx.y))[x] = make_uchar2(hidx, (hidx + 1) % cnbins);
((float2*)grad.ptr(blockIdx.y))[x] = make_float2(mag0 * (1.f - ang), mag0 * ang);
}
}
void compute_gradients_8UC4(int nbins, int height, int width, const PtrStepSzb& img,
float angle_scale, PtrStepSzf grad, PtrStepSzb qangle, bool correct_gamma)
{
(void)nbins;
const int nthreads = 256;
dim3 bdim(nthreads, 1);
dim3 gdim(divUp(width, bdim.x), divUp(height, bdim.y));
if (correct_gamma)
hipLaunchKernelGGL(( compute_gradients_8UC4_kernel<nthreads, 1>), dim3(gdim), dim3(bdim), 0, 0, height, width, img, angle_scale, grad, qangle);
else
hipLaunchKernelGGL(( compute_gradients_8UC4_kernel<nthreads, 0>), dim3(gdim), dim3(bdim), 0, 0, height, width, img, angle_scale, grad, qangle);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
template <int nthreads, int correct_gamma>
__global__ void compute_gradients_8UC1_kernel(int height, int width, const PtrStepb img,
float angle_scale, PtrStepf grad, PtrStepb qangle)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned char* row = (const unsigned char*)img.ptr(blockIdx.y);
__shared__ float sh_row[nthreads + 2];
if (x < width)
sh_row[threadIdx.x + 1] = row[x];
else
sh_row[threadIdx.x + 1] = row[width - 2];
if (threadIdx.x == 0)
sh_row[0] = row[::max(x - 1, 1)];
if (threadIdx.x == blockDim.x - 1)
sh_row[blockDim.x + 1] = row[::min(x + 1, width - 2)];
__syncthreads();
if (x < width)
{
float dx;
if (correct_gamma)
dx = ::sqrtf(sh_row[threadIdx.x + 2]) - ::sqrtf(sh_row[threadIdx.x]);
else
dx = sh_row[threadIdx.x + 2] - sh_row[threadIdx.x];
float dy = 0.f;
if (blockIdx.y > 0 && blockIdx.y < height - 1)
{
float a = ((const unsigned char*)img.ptr(blockIdx.y + 1))[x];
float b = ((const unsigned char*)img.ptr(blockIdx.y - 1))[x];
if (correct_gamma)
dy = ::sqrtf(a) - ::sqrtf(b);
else
dy = a - b;
}
float mag = ::sqrtf(dx * dx + dy * dy);
float ang = (::atan2f(dy, dx) + CV_PI_F) * angle_scale - 0.5f;
int hidx = (int)::floorf(ang);
ang -= hidx;
hidx = (hidx + cnbins) % cnbins;
((uchar2*)qangle.ptr(blockIdx.y))[x] = make_uchar2(hidx, (hidx + 1) % cnbins);
((float2*) grad.ptr(blockIdx.y))[x] = make_float2(mag * (1.f - ang), mag * ang);
}
}
void compute_gradients_8UC1(int nbins, int height, int width, const PtrStepSzb& img,
float angle_scale, PtrStepSzf grad, PtrStepSzb qangle, bool correct_gamma)
{
(void)nbins;
const int nthreads = 256;
dim3 bdim(nthreads, 1);
dim3 gdim(divUp(width, bdim.x), divUp(height, bdim.y));
if (correct_gamma)
hipLaunchKernelGGL(( compute_gradients_8UC1_kernel<nthreads, 1>), dim3(gdim), dim3(bdim), 0, 0, height, width, img, angle_scale, grad, qangle);
else
hipLaunchKernelGGL(( compute_gradients_8UC1_kernel<nthreads, 0>), dim3(gdim), dim3(bdim), 0, 0, height, width, img, angle_scale, grad, qangle);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
//-------------------------------------------------------------------
// Resize
texture<uchar4, 2, hipReadModeNormalizedFloat> resize8UC4_tex;
texture<uchar, 2, hipReadModeNormalizedFloat> resize8UC1_tex;
__global__ void resize_for_hog_kernel(float sx, float sy, PtrStepSz<uchar> dst, int colOfs)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
dst.ptr(y)[x] = tex2D(resize8UC1_tex, x * sx + colOfs, y * sy) * 255;
}
__global__ void resize_for_hog_kernel(float sx, float sy, PtrStepSz<uchar4> dst, int colOfs)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
float4 val = tex2D(resize8UC4_tex, x * sx + colOfs, y * sy);
dst.ptr(y)[x] = make_uchar4(val.x * 255, val.y * 255, val.z * 255, val.w * 255);
}
}
template<class T, class TEX>
static void resize_for_hog(const PtrStepSzb& src, PtrStepSzb dst, TEX& tex)
{
tex.filterMode = hipFilterModeLinear;
size_t texOfs = 0;
int colOfs = 0;
hipChannelFormatDesc desc = hipCreateChannelDesc<T>();
cudaSafeCall( hipBindTexture2D(&texOfs, tex, src.data, desc, src.cols, src.rows, src.step) );
if (texOfs != 0)
{
colOfs = static_cast<int>( texOfs/sizeof(T) );
cudaSafeCall( hipUnbindTexture(tex) );
cudaSafeCall( hipBindTexture2D(&texOfs, tex, src.data, desc, src.cols, src.rows, src.step) );
}
dim3 threads(32, 8);
dim3 grid(divUp(dst.cols, threads.x), divUp(dst.rows, threads.y));
float sx = static_cast<float>(src.cols) / dst.cols;
float sy = static_cast<float>(src.rows) / dst.rows;
hipLaunchKernelGGL(( resize_for_hog_kernel), dim3(grid), dim3(threads), 0, 0, sx, sy, (PtrStepSz<T>)dst, colOfs);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
cudaSafeCall( hipUnbindTexture(tex) );
}
void resize_8UC1(const PtrStepSzb& src, PtrStepSzb dst) { resize_for_hog<uchar> (src, dst, resize8UC1_tex); }
void resize_8UC4(const PtrStepSzb& src, PtrStepSzb dst) { resize_for_hog<uchar4>(src, dst, resize8UC4_tex); }
} // namespace hog
}}} // namespace cv { namespace gpu { namespace cudev
#endif /* CUDA_DISABLER */
|
778f42c699fe3692dd8efaab0f4fc410efc7eb53.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/warp_shuffle.hpp"
namespace cv { namespace gpu { namespace cudev
{
// Other values are not supported
#define CELL_WIDTH 8
#define CELL_HEIGHT 8
#define CELLS_PER_BLOCK_X 2
#define CELLS_PER_BLOCK_Y 2
namespace hog
{
__constant__ int cnbins;
__constant__ int cblock_stride_x;
__constant__ int cblock_stride_y;
__constant__ int cnblocks_win_x;
__constant__ int cnblocks_win_y;
__constant__ int cblock_hist_size;
__constant__ int cblock_hist_size_2up;
__constant__ int cdescr_size;
__constant__ int cdescr_width;
/* Returns the nearest upper power of two, works only for
the typical GPU thread count (pert block) values */
int power_2up(unsigned int n)
{
if (n < 1) return 1;
else if (n < 2) return 2;
else if (n < 4) return 4;
else if (n < 8) return 8;
else if (n < 16) return 16;
else if (n < 32) return 32;
else if (n < 64) return 64;
else if (n < 128) return 128;
else if (n < 256) return 256;
else if (n < 512) return 512;
else if (n < 1024) return 1024;
return -1; // Input is too big
}
void set_up_constants(int nbins, int block_stride_x, int block_stride_y,
int nblocks_win_x, int nblocks_win_y)
{
cudaSafeCall( cudaMemcpyToSymbol(cnbins, &nbins, sizeof(nbins)) );
cudaSafeCall( cudaMemcpyToSymbol(cblock_stride_x, &block_stride_x, sizeof(block_stride_x)) );
cudaSafeCall( cudaMemcpyToSymbol(cblock_stride_y, &block_stride_y, sizeof(block_stride_y)) );
cudaSafeCall( cudaMemcpyToSymbol(cnblocks_win_x, &nblocks_win_x, sizeof(nblocks_win_x)) );
cudaSafeCall( cudaMemcpyToSymbol(cnblocks_win_y, &nblocks_win_y, sizeof(nblocks_win_y)) );
int block_hist_size = nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y;
cudaSafeCall( cudaMemcpyToSymbol(cblock_hist_size, &block_hist_size, sizeof(block_hist_size)) );
int block_hist_size_2up = power_2up(block_hist_size);
cudaSafeCall( cudaMemcpyToSymbol(cblock_hist_size_2up, &block_hist_size_2up, sizeof(block_hist_size_2up)) );
int descr_width = nblocks_win_x * block_hist_size;
cudaSafeCall( cudaMemcpyToSymbol(cdescr_width, &descr_width, sizeof(descr_width)) );
int descr_size = descr_width * nblocks_win_y;
cudaSafeCall( cudaMemcpyToSymbol(cdescr_size, &descr_size, sizeof(descr_size)) );
}
//----------------------------------------------------------------------------
// Histogram computation
template <int nblocks> // Number of histogram blocks processed by single GPU thread block
__global__ void compute_hists_kernel_many_blocks(const int img_block_width, const PtrStepf grad,
const PtrStepb qangle, float scale, float* block_hists)
{
const int block_x = threadIdx.z;
const int cell_x = threadIdx.x / 16;
const int cell_y = threadIdx.y;
const int cell_thread_x = threadIdx.x & 0xF;
if (blockIdx.x * blockDim.z + block_x >= img_block_width)
return;
extern __shared__ float smem[];
float* hists = smem;
float* final_hist = smem + cnbins * 48 * nblocks;
const int offset_x = (blockIdx.x * blockDim.z + block_x) * cblock_stride_x +
4 * cell_x + cell_thread_x;
const int offset_y = blockIdx.y * cblock_stride_y + 4 * cell_y;
const float* grad_ptr = grad.ptr(offset_y) + offset_x * 2;
const unsigned char* qangle_ptr = qangle.ptr(offset_y) + offset_x * 2;
// 12 means that 12 pixels affect on block's cell (in one row)
if (cell_thread_x < 12)
{
float* hist = hists + 12 * (cell_y * blockDim.z * CELLS_PER_BLOCK_Y +
cell_x + block_x * CELLS_PER_BLOCK_X) +
cell_thread_x;
for (int bin_id = 0; bin_id < cnbins; ++bin_id)
hist[bin_id * 48 * nblocks] = 0.f;
const int dist_x = -4 + (int)cell_thread_x - 4 * cell_x;
const int dist_y_begin = -4 - 4 * (int)threadIdx.y;
for (int dist_y = dist_y_begin; dist_y < dist_y_begin + 12; ++dist_y)
{
float2 vote = *(const float2*)grad_ptr;
uchar2 bin = *(const uchar2*)qangle_ptr;
grad_ptr += grad.step/sizeof(float);
qangle_ptr += qangle.step;
int dist_center_y = dist_y - 4 * (1 - 2 * cell_y);
int dist_center_x = dist_x - 4 * (1 - 2 * cell_x);
float gaussian = ::expf(-(dist_center_y * dist_center_y +
dist_center_x * dist_center_x) * scale);
float interp_weight = (8.f - ::fabs(dist_y + 0.5f)) *
(8.f - ::fabs(dist_x + 0.5f)) / 64.f;
hist[bin.x * 48 * nblocks] += gaussian * interp_weight * vote.x;
hist[bin.y * 48 * nblocks] += gaussian * interp_weight * vote.y;
}
volatile float* hist_ = hist;
for (int bin_id = 0; bin_id < cnbins; ++bin_id, hist_ += 48 * nblocks)
{
if (cell_thread_x < 6) hist_[0] += hist_[6];
if (cell_thread_x < 3) hist_[0] += hist_[3];
if (cell_thread_x == 0)
final_hist[((cell_x + block_x * 2) * 2 + cell_y) * cnbins + bin_id]
= hist_[0] + hist_[1] + hist_[2];
}
}
__syncthreads();
float* block_hist = block_hists + (blockIdx.y * img_block_width +
blockIdx.x * blockDim.z + block_x) *
cblock_hist_size;
int tid = (cell_y * CELLS_PER_BLOCK_Y + cell_x) * 16 + cell_thread_x;
if (tid < cblock_hist_size)
block_hist[tid] = final_hist[block_x * cblock_hist_size + tid];
}
void compute_hists(int nbins, int block_stride_x, int block_stride_y,
int height, int width, const PtrStepSzf& grad,
const PtrStepSzb& qangle, float sigma, float* block_hists)
{
const int nblocks = 1;
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) /
block_stride_x;
int img_block_height = (height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y) /
block_stride_y;
dim3 grid(divUp(img_block_width, nblocks), img_block_height);
dim3 threads(32, 2, nblocks);
cudaSafeCall(cudaFuncSetCacheConfig(compute_hists_kernel_many_blocks<nblocks>,
cudaFuncCachePreferL1));
// Precompute gaussian spatial window parameter
float scale = 1.f / (2.f * sigma * sigma);
int hists_size = (nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y * 12 * nblocks) * sizeof(float);
int final_hists_size = (nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y * nblocks) * sizeof(float);
int smem = hists_size + final_hists_size;
compute_hists_kernel_many_blocks<nblocks><<<grid, threads, smem>>>(
img_block_width, grad, qangle, scale, block_hists);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
//-------------------------------------------------------------
// Normalization of histograms via L2Hys_norm
//
template<int size>
__device__ float reduce_smem(float* smem, float val)
{
unsigned int tid = threadIdx.x;
float sum = val;
reduce<size>(smem, sum, tid, plus<float>());
if (size == 32)
{
#if __CUDA_ARCH__ >= 300
return shfl(sum, 0);
#else
return smem[0];
#endif
}
else
{
#if __CUDA_ARCH__ >= 300
if (threadIdx.x == 0)
smem[0] = sum;
#endif
__syncthreads();
return smem[0];
}
}
template <int nthreads, // Number of threads which process one block historgam
int nblocks> // Number of block hisograms processed by one GPU thread block
__global__ void normalize_hists_kernel_many_blocks(const int block_hist_size,
const int img_block_width,
float* block_hists, float threshold)
{
if (blockIdx.x * blockDim.z + threadIdx.z >= img_block_width)
return;
float* hist = block_hists + (blockIdx.y * img_block_width +
blockIdx.x * blockDim.z + threadIdx.z) *
block_hist_size + threadIdx.x;
__shared__ float sh_squares[nthreads * nblocks];
float* squares = sh_squares + threadIdx.z * nthreads;
float elem = 0.f;
if (threadIdx.x < block_hist_size)
elem = hist[0];
float sum = reduce_smem<nthreads>(squares, elem * elem);
float scale = 1.0f / (::sqrtf(sum) + 0.1f * block_hist_size);
elem = ::min(elem * scale, threshold);
sum = reduce_smem<nthreads>(squares, elem * elem);
scale = 1.0f / (::sqrtf(sum) + 1e-3f);
if (threadIdx.x < block_hist_size)
hist[0] = elem * scale;
}
void normalize_hists(int nbins, int block_stride_x, int block_stride_y,
int height, int width, float* block_hists, float threshold)
{
const int nblocks = 1;
int block_hist_size = nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y;
int nthreads = power_2up(block_hist_size);
dim3 threads(nthreads, 1, nblocks);
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
int img_block_height = (height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y) / block_stride_y;
dim3 grid(divUp(img_block_width, nblocks), img_block_height);
if (nthreads == 32)
normalize_hists_kernel_many_blocks<32, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
else if (nthreads == 64)
normalize_hists_kernel_many_blocks<64, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
else if (nthreads == 128)
normalize_hists_kernel_many_blocks<64, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
else if (nthreads == 256)
normalize_hists_kernel_many_blocks<256, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
else if (nthreads == 512)
normalize_hists_kernel_many_blocks<512, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
else
CV_Error(cv::Error::StsBadArg, "normalize_hists: histogram's size is too big, try to decrease number of bins");
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
//---------------------------------------------------------------------
// Linear SVM based classification
//
// return confidence values not just positive location
template <int nthreads, // Number of threads per one histogram block
int nblocks> // Number of histogram block processed by single GPU thread block
__global__ void compute_confidence_hists_kernel_many_blocks(const int img_win_width, const int img_block_width,
const int win_block_stride_x, const int win_block_stride_y,
const float* block_hists, const float* coefs,
float free_coef, float threshold, float* confidences)
{
const int win_x = threadIdx.z;
if (blockIdx.x * blockDim.z + win_x >= img_win_width)
return;
const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
blockIdx.x * win_block_stride_x * blockDim.z + win_x) *
cblock_hist_size;
float product = 0.f;
for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
{
int offset_y = i / cdescr_width;
int offset_x = i - offset_y * cdescr_width;
product += coefs[i] * hist[offset_y * img_block_width * cblock_hist_size + offset_x];
}
__shared__ float products[nthreads * nblocks];
const int tid = threadIdx.z * nthreads + threadIdx.x;
reduce<nthreads>(products, product, tid, plus<float>());
if (threadIdx.x == 0)
confidences[blockIdx.y * img_win_width + blockIdx.x * blockDim.z + win_x] = product + free_coef;
}
void compute_confidence_hists(int win_height, int win_width, int block_stride_y, int block_stride_x,
int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
float* coefs, float free_coef, float threshold, float *confidences)
{
const int nthreads = 256;
const int nblocks = 1;
int win_block_stride_x = win_stride_x / block_stride_x;
int win_block_stride_y = win_stride_y / block_stride_y;
int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
int img_win_height = (height - win_height + win_stride_y) / win_stride_y;
dim3 threads(nthreads, 1, nblocks);
dim3 grid(divUp(img_win_width, nblocks), img_win_height);
cudaSafeCall(cudaFuncSetCacheConfig(compute_confidence_hists_kernel_many_blocks<nthreads, nblocks>,
cudaFuncCachePreferL1));
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) /
block_stride_x;
compute_confidence_hists_kernel_many_blocks<nthreads, nblocks><<<grid, threads>>>(
img_win_width, img_block_width, win_block_stride_x, win_block_stride_y,
block_hists, coefs, free_coef, threshold, confidences);
cudaSafeCall(cudaThreadSynchronize());
}
template <int nthreads, // Number of threads per one histogram block
int nblocks> // Number of histogram block processed by single GPU thread block
__global__ void classify_hists_kernel_many_blocks(const int img_win_width, const int img_block_width,
const int win_block_stride_x, const int win_block_stride_y,
const float* block_hists, const float* coefs,
float free_coef, float threshold, unsigned char* labels)
{
const int win_x = threadIdx.z;
if (blockIdx.x * blockDim.z + win_x >= img_win_width)
return;
const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
blockIdx.x * win_block_stride_x * blockDim.z + win_x) *
cblock_hist_size;
float product = 0.f;
for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
{
int offset_y = i / cdescr_width;
int offset_x = i - offset_y * cdescr_width;
product += coefs[i] * hist[offset_y * img_block_width * cblock_hist_size + offset_x];
}
__shared__ float products[nthreads * nblocks];
const int tid = threadIdx.z * nthreads + threadIdx.x;
reduce<nthreads>(products, product, tid, plus<float>());
if (threadIdx.x == 0)
labels[blockIdx.y * img_win_width + blockIdx.x * blockDim.z + win_x] = (product + free_coef >= threshold);
}
void classify_hists(int win_height, int win_width, int block_stride_y, int block_stride_x,
int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
float* coefs, float free_coef, float threshold, unsigned char* labels)
{
const int nthreads = 256;
const int nblocks = 1;
int win_block_stride_x = win_stride_x / block_stride_x;
int win_block_stride_y = win_stride_y / block_stride_y;
int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
int img_win_height = (height - win_height + win_stride_y) / win_stride_y;
dim3 threads(nthreads, 1, nblocks);
dim3 grid(divUp(img_win_width, nblocks), img_win_height);
cudaSafeCall(cudaFuncSetCacheConfig(classify_hists_kernel_many_blocks<nthreads, nblocks>, cudaFuncCachePreferL1));
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
classify_hists_kernel_many_blocks<nthreads, nblocks><<<grid, threads>>>(
img_win_width, img_block_width, win_block_stride_x, win_block_stride_y,
block_hists, coefs, free_coef, threshold, labels);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
//----------------------------------------------------------------------------
// Extract descriptors
template <int nthreads>
__global__ void extract_descrs_by_rows_kernel(const int img_block_width, const int win_block_stride_x, const int win_block_stride_y,
const float* block_hists, PtrStepf descriptors)
{
// Get left top corner of the window in src
const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
blockIdx.x * win_block_stride_x) * cblock_hist_size;
// Get left top corner of the window in dst
float* descriptor = descriptors.ptr(blockIdx.y * gridDim.x + blockIdx.x);
// Copy elements from src to dst
for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
{
int offset_y = i / cdescr_width;
int offset_x = i - offset_y * cdescr_width;
descriptor[i] = hist[offset_y * img_block_width * cblock_hist_size + offset_x];
}
}
void extract_descrs_by_rows(int win_height, int win_width, int block_stride_y, int block_stride_x, int win_stride_y, int win_stride_x,
int height, int width, float* block_hists, PtrStepSzf descriptors)
{
const int nthreads = 256;
int win_block_stride_x = win_stride_x / block_stride_x;
int win_block_stride_y = win_stride_y / block_stride_y;
int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
int img_win_height = (height - win_height + win_stride_y) / win_stride_y;
dim3 threads(nthreads, 1);
dim3 grid(img_win_width, img_win_height);
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
extract_descrs_by_rows_kernel<nthreads><<<grid, threads>>>(
img_block_width, win_block_stride_x, win_block_stride_y, block_hists, descriptors);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int nthreads>
__global__ void extract_descrs_by_cols_kernel(const int img_block_width, const int win_block_stride_x,
const int win_block_stride_y, const float* block_hists,
PtrStepf descriptors)
{
// Get left top corner of the window in src
const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
blockIdx.x * win_block_stride_x) * cblock_hist_size;
// Get left top corner of the window in dst
float* descriptor = descriptors.ptr(blockIdx.y * gridDim.x + blockIdx.x);
// Copy elements from src to dst
for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
{
int block_idx = i / cblock_hist_size;
int idx_in_block = i - block_idx * cblock_hist_size;
int y = block_idx / cnblocks_win_x;
int x = block_idx - y * cnblocks_win_x;
descriptor[(x * cnblocks_win_y + y) * cblock_hist_size + idx_in_block]
= hist[(y * img_block_width + x) * cblock_hist_size + idx_in_block];
}
}
void extract_descrs_by_cols(int win_height, int win_width, int block_stride_y, int block_stride_x,
int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
PtrStepSzf descriptors)
{
const int nthreads = 256;
int win_block_stride_x = win_stride_x / block_stride_x;
int win_block_stride_y = win_stride_y / block_stride_y;
int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
int img_win_height = (height - win_height + win_stride_y) / win_stride_y;
dim3 threads(nthreads, 1);
dim3 grid(img_win_width, img_win_height);
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
extract_descrs_by_cols_kernel<nthreads><<<grid, threads>>>(
img_block_width, win_block_stride_x, win_block_stride_y, block_hists, descriptors);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
//----------------------------------------------------------------------------
// Gradients computation
template <int nthreads, int correct_gamma>
__global__ void compute_gradients_8UC4_kernel(int height, int width, const PtrStepb img,
float angle_scale, PtrStepf grad, PtrStepb qangle)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const uchar4* row = (const uchar4*)img.ptr(blockIdx.y);
__shared__ float sh_row[(nthreads + 2) * 3];
uchar4 val;
if (x < width)
val = row[x];
else
val = row[width - 2];
sh_row[threadIdx.x + 1] = val.x;
sh_row[threadIdx.x + 1 + (nthreads + 2)] = val.y;
sh_row[threadIdx.x + 1 + 2 * (nthreads + 2)] = val.z;
if (threadIdx.x == 0)
{
val = row[::max(x - 1, 1)];
sh_row[0] = val.x;
sh_row[(nthreads + 2)] = val.y;
sh_row[2 * (nthreads + 2)] = val.z;
}
if (threadIdx.x == blockDim.x - 1)
{
val = row[::min(x + 1, width - 2)];
sh_row[blockDim.x + 1] = val.x;
sh_row[blockDim.x + 1 + (nthreads + 2)] = val.y;
sh_row[blockDim.x + 1 + 2 * (nthreads + 2)] = val.z;
}
__syncthreads();
if (x < width)
{
float3 a, b;
b.x = sh_row[threadIdx.x + 2];
b.y = sh_row[threadIdx.x + 2 + (nthreads + 2)];
b.z = sh_row[threadIdx.x + 2 + 2 * (nthreads + 2)];
a.x = sh_row[threadIdx.x];
a.y = sh_row[threadIdx.x + (nthreads + 2)];
a.z = sh_row[threadIdx.x + 2 * (nthreads + 2)];
float3 dx;
if (correct_gamma)
dx = make_float3(::sqrtf(b.x) - ::sqrtf(a.x), ::sqrtf(b.y) - ::sqrtf(a.y), ::sqrtf(b.z) - ::sqrtf(a.z));
else
dx = make_float3(b.x - a.x, b.y - a.y, b.z - a.z);
float3 dy = make_float3(0.f, 0.f, 0.f);
if (blockIdx.y > 0 && blockIdx.y < height - 1)
{
val = ((const uchar4*)img.ptr(blockIdx.y - 1))[x];
a = make_float3(val.x, val.y, val.z);
val = ((const uchar4*)img.ptr(blockIdx.y + 1))[x];
b = make_float3(val.x, val.y, val.z);
if (correct_gamma)
dy = make_float3(::sqrtf(b.x) - ::sqrtf(a.x), ::sqrtf(b.y) - ::sqrtf(a.y), ::sqrtf(b.z) - ::sqrtf(a.z));
else
dy = make_float3(b.x - a.x, b.y - a.y, b.z - a.z);
}
float best_dx = dx.x;
float best_dy = dy.x;
float mag0 = dx.x * dx.x + dy.x * dy.x;
float mag1 = dx.y * dx.y + dy.y * dy.y;
if (mag0 < mag1)
{
best_dx = dx.y;
best_dy = dy.y;
mag0 = mag1;
}
mag1 = dx.z * dx.z + dy.z * dy.z;
if (mag0 < mag1)
{
best_dx = dx.z;
best_dy = dy.z;
mag0 = mag1;
}
mag0 = ::sqrtf(mag0);
float ang = (::atan2f(best_dy, best_dx) + CV_PI_F) * angle_scale - 0.5f;
int hidx = (int)::floorf(ang);
ang -= hidx;
hidx = (hidx + cnbins) % cnbins;
((uchar2*)qangle.ptr(blockIdx.y))[x] = make_uchar2(hidx, (hidx + 1) % cnbins);
((float2*)grad.ptr(blockIdx.y))[x] = make_float2(mag0 * (1.f - ang), mag0 * ang);
}
}
void compute_gradients_8UC4(int nbins, int height, int width, const PtrStepSzb& img,
float angle_scale, PtrStepSzf grad, PtrStepSzb qangle, bool correct_gamma)
{
(void)nbins;
const int nthreads = 256;
dim3 bdim(nthreads, 1);
dim3 gdim(divUp(width, bdim.x), divUp(height, bdim.y));
if (correct_gamma)
compute_gradients_8UC4_kernel<nthreads, 1><<<gdim, bdim>>>(height, width, img, angle_scale, grad, qangle);
else
compute_gradients_8UC4_kernel<nthreads, 0><<<gdim, bdim>>>(height, width, img, angle_scale, grad, qangle);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int nthreads, int correct_gamma>
__global__ void compute_gradients_8UC1_kernel(int height, int width, const PtrStepb img,
float angle_scale, PtrStepf grad, PtrStepb qangle)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned char* row = (const unsigned char*)img.ptr(blockIdx.y);
__shared__ float sh_row[nthreads + 2];
if (x < width)
sh_row[threadIdx.x + 1] = row[x];
else
sh_row[threadIdx.x + 1] = row[width - 2];
if (threadIdx.x == 0)
sh_row[0] = row[::max(x - 1, 1)];
if (threadIdx.x == blockDim.x - 1)
sh_row[blockDim.x + 1] = row[::min(x + 1, width - 2)];
__syncthreads();
if (x < width)
{
float dx;
if (correct_gamma)
dx = ::sqrtf(sh_row[threadIdx.x + 2]) - ::sqrtf(sh_row[threadIdx.x]);
else
dx = sh_row[threadIdx.x + 2] - sh_row[threadIdx.x];
float dy = 0.f;
if (blockIdx.y > 0 && blockIdx.y < height - 1)
{
float a = ((const unsigned char*)img.ptr(blockIdx.y + 1))[x];
float b = ((const unsigned char*)img.ptr(blockIdx.y - 1))[x];
if (correct_gamma)
dy = ::sqrtf(a) - ::sqrtf(b);
else
dy = a - b;
}
float mag = ::sqrtf(dx * dx + dy * dy);
float ang = (::atan2f(dy, dx) + CV_PI_F) * angle_scale - 0.5f;
int hidx = (int)::floorf(ang);
ang -= hidx;
hidx = (hidx + cnbins) % cnbins;
((uchar2*)qangle.ptr(blockIdx.y))[x] = make_uchar2(hidx, (hidx + 1) % cnbins);
((float2*) grad.ptr(blockIdx.y))[x] = make_float2(mag * (1.f - ang), mag * ang);
}
}
void compute_gradients_8UC1(int nbins, int height, int width, const PtrStepSzb& img,
float angle_scale, PtrStepSzf grad, PtrStepSzb qangle, bool correct_gamma)
{
(void)nbins;
const int nthreads = 256;
dim3 bdim(nthreads, 1);
dim3 gdim(divUp(width, bdim.x), divUp(height, bdim.y));
if (correct_gamma)
compute_gradients_8UC1_kernel<nthreads, 1><<<gdim, bdim>>>(height, width, img, angle_scale, grad, qangle);
else
compute_gradients_8UC1_kernel<nthreads, 0><<<gdim, bdim>>>(height, width, img, angle_scale, grad, qangle);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
//-------------------------------------------------------------------
// Resize
texture<uchar4, 2, cudaReadModeNormalizedFloat> resize8UC4_tex;
texture<uchar, 2, cudaReadModeNormalizedFloat> resize8UC1_tex;
__global__ void resize_for_hog_kernel(float sx, float sy, PtrStepSz<uchar> dst, int colOfs)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
dst.ptr(y)[x] = tex2D(resize8UC1_tex, x * sx + colOfs, y * sy) * 255;
}
__global__ void resize_for_hog_kernel(float sx, float sy, PtrStepSz<uchar4> dst, int colOfs)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
float4 val = tex2D(resize8UC4_tex, x * sx + colOfs, y * sy);
dst.ptr(y)[x] = make_uchar4(val.x * 255, val.y * 255, val.z * 255, val.w * 255);
}
}
template<class T, class TEX>
static void resize_for_hog(const PtrStepSzb& src, PtrStepSzb dst, TEX& tex)
{
tex.filterMode = cudaFilterModeLinear;
size_t texOfs = 0;
int colOfs = 0;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
cudaSafeCall( cudaBindTexture2D(&texOfs, tex, src.data, desc, src.cols, src.rows, src.step) );
if (texOfs != 0)
{
colOfs = static_cast<int>( texOfs/sizeof(T) );
cudaSafeCall( cudaUnbindTexture(tex) );
cudaSafeCall( cudaBindTexture2D(&texOfs, tex, src.data, desc, src.cols, src.rows, src.step) );
}
dim3 threads(32, 8);
dim3 grid(divUp(dst.cols, threads.x), divUp(dst.rows, threads.y));
float sx = static_cast<float>(src.cols) / dst.cols;
float sy = static_cast<float>(src.rows) / dst.rows;
resize_for_hog_kernel<<<grid, threads>>>(sx, sy, (PtrStepSz<T>)dst, colOfs);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
cudaSafeCall( cudaUnbindTexture(tex) );
}
void resize_8UC1(const PtrStepSzb& src, PtrStepSzb dst) { resize_for_hog<uchar> (src, dst, resize8UC1_tex); }
void resize_8UC4(const PtrStepSzb& src, PtrStepSzb dst) { resize_for_hog<uchar4>(src, dst, resize8UC4_tex); }
} // namespace hog
}}} // namespace cv { namespace gpu { namespace cudev
#endif /* CUDA_DISABLER */
|
b4c8d48da3c3cbd64c97f0ef2bc7b3bc35306363.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "helper_cuda.h"
void merge(int *array, int start, int mid, int end)
{
int left_index, right_index, global_index;
int left_len = mid - start + 1;
int right_len = end - mid;
int left[left_len];
int right[right_len];
// initialize left array
for (int i = 0; i < left_len; i++) {
left[i] = array[start + i];
}
// initialize right array
for (int i = 0; i < right_len; i++) {
right[i] = array[mid + 1 + i];
}
// index of left array
left_index = 0;
// index of right array
right_index = 0;
// index of merged array
global_index = start;
while (left_index < left_len && right_index < right_len) {
if (left[left_index] <= right[right_index]) {
array[global_index++] = left[left_index++];
} else {
array[global_index++] = right[right_index++];
}
}
// copy the rest of left array
while (left_index < left_len) {
array[global_index++] = left[left_index++];
}
// copy the rest of right array
while (right_index < right_len) {
array[global_index++] = right[right_index++];
}
}
void cpu_merge_sort(int *array, int start, int end)
{
if (start >= end)
return;
int mid = start + (end - start) / 2;
cpu_merge_sort(array, start, mid);
cpu_merge_sort(array, mid + 1, end);
merge(array, start, mid, end);
}
__global__ void gpu_merge(int *d_in, int *d_out, int size, int sorted_size)
{
// global ID
int gid = blockIdx.x * blockDim.x + threadIdx.x;
// start, end of left subarray
int left_start = gid * 2 * sorted_size;
int left_end = min((gid * 2 + 1) * sorted_size - 1, size - 1);
// start, end of right subarray
int right_start = (gid * 2 + 1) * sorted_size;
int right_end = min((gid * 2 + 2) * sorted_size - 1, size - 1);
int left_index = left_start, right_index= right_start, global_index= left_start;
while (left_index <= left_end && right_index <= right_end) {
if (d_in[left_index] <= d_in[right_index]) {
d_out[global_index++] = d_in[left_index++];
} else {
d_out[global_index++] = d_in[right_index++];
}
}
while (left_index <= left_end) {
d_out[global_index++] = d_in[left_index++];
}
while (right_index <= right_end) {
d_out[global_index++] = d_in[right_index++];
}
}
void gpu_merge_sort(int *h_in, int size)
{
int *d_in, *d_out, *tmp;
if (size == 1)
return;
// allocate GPU memory
checkCudaErrors(hipMalloc((void**)&d_in, size * sizeof(int)));
checkCudaErrors(hipMalloc((void**)&d_out, size * sizeof(int)));
// copy input from host memory to GPU memory
hipMemcpy(d_in, h_in, size * sizeof(int), hipMemcpyHostToDevice);
// # of threads per block
int threads_per_block = 1024;
int sorted_size = 1;
// # of blocks
int blocks = 1;
while (sorted_size < size) {
// each thread can merge at most 2 * sorted_size elements
// how many threads do we need in total?
int threads_total = (size + 2 * sorted_size - 1) / (2 * sorted_size);
// total # of blocks that we need
blocks = (threads_total + threads_per_block - 1) / threads_per_block;
hipLaunchKernelGGL(( gpu_merge), dim3(blocks), dim3(threads_per_block), 0, 0, d_in, d_out, size, sorted_size);
sorted_size *= 2;
// exchange input and output
tmp = d_in;
d_in = d_out;
d_out = tmp;
}
// copy output from GPU memory to host memory
hipMemcpy(h_in, d_in, size * sizeof(int), hipMemcpyDeviceToHost);
// free GPU memory
hipFree(d_in);
hipFree(d_out);
}
int main()
{
int array_size = 111111;
int array[array_size];
// sort result computed by GPU
int h_in[array_size];
bool result;
// initialize random number generator
srand(time(NULL));
printf("Input\n");
for (int i = 0; i < array_size; i++) {
array[i] = rand() % array_size;
h_in[i] = array[i];
printf("%d ", array[i]);
}
printf("\n");
// merge sort on CPU
cpu_merge_sort(array, 0, array_size - 1);
printf("Expected Output\n");
for (int i = 0; i < array_size; i++) {
printf("%d ", array[i]);
}
printf("\n");
// merge sort on GPU
gpu_merge_sort(h_in, array_size);
printf("GPU Output\n");
result = true;
for (int i = 0; i < array_size; i++) {
printf("%d ", h_in[i]);
if (h_in[i] != array[i]) {
result = false;
}
}
printf("\n");
if (result) {
printf("Correct\n");
} else {
printf("Wrong\n");
}
return 0;
}
|
b4c8d48da3c3cbd64c97f0ef2bc7b3bc35306363.cu
|
#include <stdio.h>
#include "helper_cuda.h"
void merge(int *array, int start, int mid, int end)
{
int left_index, right_index, global_index;
int left_len = mid - start + 1;
int right_len = end - mid;
int left[left_len];
int right[right_len];
// initialize left array
for (int i = 0; i < left_len; i++) {
left[i] = array[start + i];
}
// initialize right array
for (int i = 0; i < right_len; i++) {
right[i] = array[mid + 1 + i];
}
// index of left array
left_index = 0;
// index of right array
right_index = 0;
// index of merged array
global_index = start;
while (left_index < left_len && right_index < right_len) {
if (left[left_index] <= right[right_index]) {
array[global_index++] = left[left_index++];
} else {
array[global_index++] = right[right_index++];
}
}
// copy the rest of left array
while (left_index < left_len) {
array[global_index++] = left[left_index++];
}
// copy the rest of right array
while (right_index < right_len) {
array[global_index++] = right[right_index++];
}
}
void cpu_merge_sort(int *array, int start, int end)
{
if (start >= end)
return;
int mid = start + (end - start) / 2;
cpu_merge_sort(array, start, mid);
cpu_merge_sort(array, mid + 1, end);
merge(array, start, mid, end);
}
__global__ void gpu_merge(int *d_in, int *d_out, int size, int sorted_size)
{
// global ID
int gid = blockIdx.x * blockDim.x + threadIdx.x;
// start, end of left subarray
int left_start = gid * 2 * sorted_size;
int left_end = min((gid * 2 + 1) * sorted_size - 1, size - 1);
// start, end of right subarray
int right_start = (gid * 2 + 1) * sorted_size;
int right_end = min((gid * 2 + 2) * sorted_size - 1, size - 1);
int left_index = left_start, right_index= right_start, global_index= left_start;
while (left_index <= left_end && right_index <= right_end) {
if (d_in[left_index] <= d_in[right_index]) {
d_out[global_index++] = d_in[left_index++];
} else {
d_out[global_index++] = d_in[right_index++];
}
}
while (left_index <= left_end) {
d_out[global_index++] = d_in[left_index++];
}
while (right_index <= right_end) {
d_out[global_index++] = d_in[right_index++];
}
}
void gpu_merge_sort(int *h_in, int size)
{
int *d_in, *d_out, *tmp;
if (size == 1)
return;
// allocate GPU memory
checkCudaErrors(cudaMalloc((void**)&d_in, size * sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&d_out, size * sizeof(int)));
// copy input from host memory to GPU memory
cudaMemcpy(d_in, h_in, size * sizeof(int), cudaMemcpyHostToDevice);
// # of threads per block
int threads_per_block = 1024;
int sorted_size = 1;
// # of blocks
int blocks = 1;
while (sorted_size < size) {
// each thread can merge at most 2 * sorted_size elements
// how many threads do we need in total?
int threads_total = (size + 2 * sorted_size - 1) / (2 * sorted_size);
// total # of blocks that we need
blocks = (threads_total + threads_per_block - 1) / threads_per_block;
gpu_merge<<<blocks, threads_per_block>>>(d_in, d_out, size, sorted_size);
sorted_size *= 2;
// exchange input and output
tmp = d_in;
d_in = d_out;
d_out = tmp;
}
// copy output from GPU memory to host memory
cudaMemcpy(h_in, d_in, size * sizeof(int), cudaMemcpyDeviceToHost);
// free GPU memory
cudaFree(d_in);
cudaFree(d_out);
}
int main()
{
int array_size = 111111;
int array[array_size];
// sort result computed by GPU
int h_in[array_size];
bool result;
// initialize random number generator
srand(time(NULL));
printf("Input\n");
for (int i = 0; i < array_size; i++) {
array[i] = rand() % array_size;
h_in[i] = array[i];
printf("%d ", array[i]);
}
printf("\n");
// merge sort on CPU
cpu_merge_sort(array, 0, array_size - 1);
printf("Expected Output\n");
for (int i = 0; i < array_size; i++) {
printf("%d ", array[i]);
}
printf("\n");
// merge sort on GPU
gpu_merge_sort(h_in, array_size);
printf("GPU Output\n");
result = true;
for (int i = 0; i < array_size; i++) {
printf("%d ", h_in[i]);
if (h_in[i] != array[i]) {
result = false;
}
}
printf("\n");
if (result) {
printf("Correct\n");
} else {
printf("Wrong\n");
}
return 0;
}
|
e999c1073fb9f24b661aee10b4c1d08f69ba1fcb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs.
*
* There's one CUDA context per thread. To use multiple CUDA contexts you
* have to create multiple threads. One for each GPU. For optimal performance,
* the number of CPU cores should be equal to the number of GPUs in the system.
*
* Creating CPU threads has a certain overhead. So, this is only worth when you
* have a significant amount of work to do per thread. It's also recommended to
* create a pool of threads and reuse them to avoid this overhead.
*
* Note that in order to detect multiple GPUs in your system you have to disable
* SLI in the nvidia control panel. Otherwise only one GPU is visible to the
* application. On the other side, you can still extend your desktop to screens
* attached to both GPUs.
*/
////////////////////////////////////////////////////////////////////////////////
// Simple reduction kernel.
// Refer to the 'reduction' CUDA SDK sample describing
// reduction optimization strategies
////////////////////////////////////////////////////////////////////////////////
__global__ static void reduceKernel(float *d_Result, float *d_Input, int N){
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for(int pos = tid; pos < N; pos += threadN)
sum += d_Input[pos];
d_Result[tid] = sum;
}
extern "C"
void launch_reduceKernel(float *d_Result, float *d_Input, int N, int BLOCK_N, int THREAD_N)
{
hipLaunchKernelGGL(( reduceKernel), dim3(BLOCK_N), dim3(THREAD_N), 0, 0, d_Result, d_Input, N);
}
|
e999c1073fb9f24b661aee10b4c1d08f69ba1fcb.cu
|
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs.
*
* There's one CUDA context per thread. To use multiple CUDA contexts you
* have to create multiple threads. One for each GPU. For optimal performance,
* the number of CPU cores should be equal to the number of GPUs in the system.
*
* Creating CPU threads has a certain overhead. So, this is only worth when you
* have a significant amount of work to do per thread. It's also recommended to
* create a pool of threads and reuse them to avoid this overhead.
*
* Note that in order to detect multiple GPUs in your system you have to disable
* SLI in the nvidia control panel. Otherwise only one GPU is visible to the
* application. On the other side, you can still extend your desktop to screens
* attached to both GPUs.
*/
////////////////////////////////////////////////////////////////////////////////
// Simple reduction kernel.
// Refer to the 'reduction' CUDA SDK sample describing
// reduction optimization strategies
////////////////////////////////////////////////////////////////////////////////
__global__ static void reduceKernel(float *d_Result, float *d_Input, int N){
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for(int pos = tid; pos < N; pos += threadN)
sum += d_Input[pos];
d_Result[tid] = sum;
}
extern "C"
void launch_reduceKernel(float *d_Result, float *d_Input, int N, int BLOCK_N, int THREAD_N)
{
reduceKernel<<<BLOCK_N, THREAD_N>>>(d_Result, d_Input, N);
}
|
7cc446d56e23d0d598d39890bacbf63a67857aa7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_complex.h>
#include <string.h>
#include <math.h>
#include "lab1.h"
static const int W = 400;
static const int H = 400;
static const unsigned NFRAME = 240;
struct Lab1VideoGenerator::Impl {
int t = 0;
};
Lab1VideoGenerator::Lab1VideoGenerator(): impl(new Impl) {
}
Lab1VideoGenerator::~Lab1VideoGenerator() {}
void Lab1VideoGenerator::get_info(Lab1VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 24;
info.fps_d = 1;
};
#define MAXITERATIONS (128)
#define MAX_DWELL (MAXITERATIONS)
#define CUT_DWELL (MAX_DWELL / 4)
cuFloatComplex c0;
__device__ cuFloatComplex juliaFunctor(cuFloatComplex p,cuFloatComplex c){
return cuCaddf(cuCmulf(p,p),c);
}
__device__ int evolveComplexPoint(cuFloatComplex p,cuFloatComplex c){
int it =1;
while(it <= MAXITERATIONS && cuCabsf(p) <=4){
p=juliaFunctor(p,c);
it++;
}
return it;
}
__device__ cuFloatComplex convertToComplex(int x, int y,float zoom,float moveX,float moveY){
float jx = 1.5 * (x - W / 2) / (0.5 * zoom * W) + moveX;
float jy = (y - H / 2) / (0.5 * zoom * H) + moveY;
return make_cuFloatComplex(jx,jy);
}
__global__ void computeJulia(uint8_t* data,uint8_t* dataU,uint8_t* dataV,cuFloatComplex c,float zoom,float moveX,float moveY,int time){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<W && j<H){
cuFloatComplex p = convertToComplex(i,j,zoom,moveX,moveY);
int dwell = evolveComplexPoint(p,c);
int r,g,b;
int divide = 12;
if(dwell >= MAX_DWELL) {
r = 0;
g = 0;
b = 0;
} else {
// cut at zero
if(dwell < 0) {
dwell = 0;
}
if(dwell <= MAX_DWELL/divide) {
r = 255;
g = time + dwell * 255 / (MAX_DWELL/divide);
b = 0;
} else if(dwell <= MAX_DWELL*2/divide && dwell > MAX_DWELL/divide) {
r = 255 - time - (dwell-MAX_DWELL/divide) * 255 / (MAX_DWELL/divide);
g = 255;
b = 0;
} else if(dwell <= MAX_DWELL*3/divide && dwell > MAX_DWELL*2/divide) {
r = 0;
g = 255;
b = time + (dwell-MAX_DWELL*2/divide) * 255 / (MAX_DWELL/divide);
} else if(dwell <= MAX_DWELL*4/divide && dwell > MAX_DWELL*3/divide) {
r = 0;
g = 255 - time - (dwell-MAX_DWELL*3/divide) * 255 / (MAX_DWELL/divide);
b = 255;
} else if(dwell <= MAX_DWELL*5/divide && dwell > MAX_DWELL*4/divide) {
r = time + (dwell-MAX_DWELL*4/divide) * 255 / (MAX_DWELL/divide);
g = 0;
b = 255;
}
else {
r = 255;
g = 0;
b = 255 - time - (dwell-MAX_DWELL*5/divide) * 255 / (MAX_DWELL/divide);
}
}
if(r<0)
r=0;
if(r>255)
r=255;
if(g<0)
g=0;
if(g>255)
g=255;
if(b<0)
b=0;
if(b>255)
b=255;
data[i*H+j] = (uint8_t)(0.299*r+0.587*g+0.114*b);
dataU[i*H+j] = (uint8_t)(-0.169*r-0.331*g+0.5*b+128);
dataV[i*H+j] = (uint8_t)(0.5*r-0.419*g-0.081*b+128);
}
}
void Lab1VideoGenerator::Generate(uint8_t *yuv) {
uint8_t *ddata, *ddataU, *ddataV;
hipMalloc((void **) &ddata, H*W*sizeof(uint8_t));
hipMalloc((void **) &ddataU, H*W*sizeof(uint8_t));
hipMalloc((void **) &ddataV, H*W*sizeof(uint8_t));
int blocksizeXY = 32;
dim3 blocksize(blocksizeXY, blocksizeXY);
int nblockXY = W/blocksize.x + (W%blocksize.x ? 1 : 0);
dim3 nblock( nblockXY , nblockXY );
float incre =0.0000003;//0.00000003
float inci =-0.0009;//-0.00009
float startre=-0.591;//-0.75
float starti=-0.387;//0.09
float zoom=1.0+0.01*(impl->t);//2.0+0.01*(impl->t)
float moveX=0.09*log(1+impl->t);//0.09*log(1+impl->t)
float moveY=0.05*log(1+impl->t);//-0.01*log(1+impl->t)
c0 = make_cuFloatComplex(startre+(impl->t)*incre,starti+(impl->t)*inci);
hipLaunchKernelGGL(( computeJulia), dim3(nblock),dim3(blocksize), 0, 0, ddata,ddataU,ddataV,c0,zoom,moveX,moveY,impl->t);
hipMemcpy(yuv, ddata, H*W, hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
hipMemcpy(yuv+(H*W), ddataU, H*W/4, hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
hipMemcpy(yuv+(H*W)+(H*W)/4, ddataV, H*W/4, hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
++(impl->t);
}
|
7cc446d56e23d0d598d39890bacbf63a67857aa7.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuComplex.h>
#include <string.h>
#include <math.h>
#include "lab1.h"
static const int W = 400;
static const int H = 400;
static const unsigned NFRAME = 240;
struct Lab1VideoGenerator::Impl {
int t = 0;
};
Lab1VideoGenerator::Lab1VideoGenerator(): impl(new Impl) {
}
Lab1VideoGenerator::~Lab1VideoGenerator() {}
void Lab1VideoGenerator::get_info(Lab1VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 24;
info.fps_d = 1;
};
#define MAXITERATIONS (128)
#define MAX_DWELL (MAXITERATIONS)
#define CUT_DWELL (MAX_DWELL / 4)
cuFloatComplex c0;
__device__ cuFloatComplex juliaFunctor(cuFloatComplex p,cuFloatComplex c){
return cuCaddf(cuCmulf(p,p),c);
}
__device__ int evolveComplexPoint(cuFloatComplex p,cuFloatComplex c){
int it =1;
while(it <= MAXITERATIONS && cuCabsf(p) <=4){
p=juliaFunctor(p,c);
it++;
}
return it;
}
__device__ cuFloatComplex convertToComplex(int x, int y,float zoom,float moveX,float moveY){
float jx = 1.5 * (x - W / 2) / (0.5 * zoom * W) + moveX;
float jy = (y - H / 2) / (0.5 * zoom * H) + moveY;
return make_cuFloatComplex(jx,jy);
}
__global__ void computeJulia(uint8_t* data,uint8_t* dataU,uint8_t* dataV,cuFloatComplex c,float zoom,float moveX,float moveY,int time){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<W && j<H){
cuFloatComplex p = convertToComplex(i,j,zoom,moveX,moveY);
int dwell = evolveComplexPoint(p,c);
int r,g,b;
int divide = 12;
if(dwell >= MAX_DWELL) {
r = 0;
g = 0;
b = 0;
} else {
// cut at zero
if(dwell < 0) {
dwell = 0;
}
if(dwell <= MAX_DWELL/divide) {
r = 255;
g = time + dwell * 255 / (MAX_DWELL/divide);
b = 0;
} else if(dwell <= MAX_DWELL*2/divide && dwell > MAX_DWELL/divide) {
r = 255 - time - (dwell-MAX_DWELL/divide) * 255 / (MAX_DWELL/divide);
g = 255;
b = 0;
} else if(dwell <= MAX_DWELL*3/divide && dwell > MAX_DWELL*2/divide) {
r = 0;
g = 255;
b = time + (dwell-MAX_DWELL*2/divide) * 255 / (MAX_DWELL/divide);
} else if(dwell <= MAX_DWELL*4/divide && dwell > MAX_DWELL*3/divide) {
r = 0;
g = 255 - time - (dwell-MAX_DWELL*3/divide) * 255 / (MAX_DWELL/divide);
b = 255;
} else if(dwell <= MAX_DWELL*5/divide && dwell > MAX_DWELL*4/divide) {
r = time + (dwell-MAX_DWELL*4/divide) * 255 / (MAX_DWELL/divide);
g = 0;
b = 255;
}
else {
r = 255;
g = 0;
b = 255 - time - (dwell-MAX_DWELL*5/divide) * 255 / (MAX_DWELL/divide);
}
}
if(r<0)
r=0;
if(r>255)
r=255;
if(g<0)
g=0;
if(g>255)
g=255;
if(b<0)
b=0;
if(b>255)
b=255;
data[i*H+j] = (uint8_t)(0.299*r+0.587*g+0.114*b);
dataU[i*H+j] = (uint8_t)(-0.169*r-0.331*g+0.5*b+128);
dataV[i*H+j] = (uint8_t)(0.5*r-0.419*g-0.081*b+128);
}
}
void Lab1VideoGenerator::Generate(uint8_t *yuv) {
uint8_t *ddata, *ddataU, *ddataV;
cudaMalloc((void **) &ddata, H*W*sizeof(uint8_t));
cudaMalloc((void **) &ddataU, H*W*sizeof(uint8_t));
cudaMalloc((void **) &ddataV, H*W*sizeof(uint8_t));
int blocksizeXY = 32;
dim3 blocksize(blocksizeXY, blocksizeXY);
int nblockXY = W/blocksize.x + (W%blocksize.x ? 1 : 0);
dim3 nblock( nblockXY , nblockXY );
float incre =0.0000003;//0.00000003
float inci =-0.0009;//-0.00009
float startre=-0.591;//-0.75
float starti=-0.387;//0.09
float zoom=1.0+0.01*(impl->t);//2.0+0.01*(impl->t)
float moveX=0.09*log(1+impl->t);//0.09*log(1+impl->t)
float moveY=0.05*log(1+impl->t);//-0.01*log(1+impl->t)
c0 = make_cuFloatComplex(startre+(impl->t)*incre,starti+(impl->t)*inci);
computeJulia<<<nblock,blocksize>>>(ddata,ddataU,ddataV,c0,zoom,moveX,moveY,impl->t);
cudaMemcpy(yuv, ddata, H*W, cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
cudaMemcpy(yuv+(H*W), ddataU, H*W/4, cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
cudaMemcpy(yuv+(H*W)+(H*W)/4, ddataV, H*W/4, cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
++(impl->t);
}
|
7e9e6da9084398fe6bf36da9ebd2af39c17d67fc.hip
|
// !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
*/
#define TILE_WIDTH 16
#define BLOCK_SIZE 16
#define N 2048
__global__ void
matrixMulCUDA(float *C, float *A, float *B, int n)
{
int start_row = blockDim.y * blockIdx.y + threadIdx.y * TILE_WIDTH;
int end_row = start_row + TILE_WIDTH;
int start_col = blockDim.x * blockIdx.x + threadIdx.x * TILE_WIDTH;
int end_col = start_col + TILE_WIDTH;
for (int row = start_row; row < end_row; row++)
{
for (int col = start_col; col < end_col; col++)
{
float C_val = 0;
for (int k = 0; k < n; ++k)
{
float A_elem = A[row * n + k];
float B_elem = B[k * n + col];
C_val += A_elem * B_elem;
}
C[row * n + col] = C_val;
}
}
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int n)
{
// Allocate host memory for matrices A and B
unsigned int size_A = n * n;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = n * n;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
unsigned int mem_size_C = n * n * sizeof(float);
float *h_C = (float *)malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void **)&d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **)&d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **)&d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 grid((((n-1) / BLOCK_SIZE + 1) - 1) / TILE_WIDTH + 1 ,(((n-1) / BLOCK_SIZE + 1) - 1) / TILE_WIDTH + 1, 1);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
hipLaunchKernelGGL(( matrixMulCUDA), dim3(grid), dim3(threads), 0, 0, d_C, d_A, d_B, n);
error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr, "Failed to launch kernel!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
printf("Elapsed time in msec = %f\n", msecTotal);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return EXIT_SUCCESS;
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
// By default, we use device 0
int devID = 0;
hipSetDevice(devID);
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Size of square matrices
size_t n = N;
// printf("[-] N = ");
// scanf("%u", &n);
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", n, n, n, n);
int matrix_result = matrixMultiply(argc, argv, n);
exit(matrix_result);
}
|
7e9e6da9084398fe6bf36da9ebd2af39c17d67fc.cu
|
// System includes
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
*/
#define TILE_WIDTH 16
#define BLOCK_SIZE 16
#define N 2048
__global__ void
matrixMulCUDA(float *C, float *A, float *B, int n)
{
int start_row = blockDim.y * blockIdx.y + threadIdx.y * TILE_WIDTH;
int end_row = start_row + TILE_WIDTH;
int start_col = blockDim.x * blockIdx.x + threadIdx.x * TILE_WIDTH;
int end_col = start_col + TILE_WIDTH;
for (int row = start_row; row < end_row; row++)
{
for (int col = start_col; col < end_col; col++)
{
float C_val = 0;
for (int k = 0; k < n; ++k)
{
float A_elem = A[row * n + k];
float B_elem = B[k * n + col];
C_val += A_elem * B_elem;
}
C[row * n + col] = C_val;
}
}
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int n)
{
// Allocate host memory for matrices A and B
unsigned int size_A = n * n;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = n * n;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
unsigned int mem_size_C = n * n * sizeof(float);
float *h_C = (float *)malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **)&d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **)&d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **)&d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 grid((((n-1) / BLOCK_SIZE + 1) - 1) / TILE_WIDTH + 1 ,(((n-1) / BLOCK_SIZE + 1) - 1) / TILE_WIDTH + 1, 1);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
matrixMulCUDA<<<grid, threads>>>(d_C, d_A, d_B, n);
error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to launch kernel!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
printf("Elapsed time in msec = %f\n", msecTotal);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return EXIT_SUCCESS;
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
// By default, we use device 0
int devID = 0;
cudaSetDevice(devID);
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Size of square matrices
size_t n = N;
// printf("[-] N = ");
// scanf("%u", &n);
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", n, n, n, n);
int matrix_result = matrixMultiply(argc, argv, n);
exit(matrix_result);
}
|
a1de2adca0de158fe09858928be6a06332b66b20.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* \file dnn/src/cuda/remap/forward.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "src/common/rounding_converter.cuh"
#include "src/cuda/cv/kernel_common.cuh"
#include "src/cuda/remap/common.h"
#include "src/cuda/utils.cuh"
using namespace megdnn;
using namespace cuda;
using namespace remap;
using namespace rounding;
namespace {
template <const uint32_t format>
__device__ inline int get_offset(
int height, int width, int channel, int h, int w, int c);
template <>
__device__ inline int get_offset<param_enumv::Remap::Format::NCHW>(
int height, int width, int channel, int h, int w, int c) {
return channel * h * w + height * w + width;
}
template <>
__device__ inline int get_offset<param_enumv::Remap::Format::NHWC>(
int height, int width, int channel, int h, int w, int c) {
return height * w * c + width * c + channel;
}
template <typename ctype, const uint32_t format, ::BorderMode bmode>
struct GetSrcData {
__device__ static inline ctype get(
const ctype* src, int height, int width, int channel, int h, int w, int c,
float) {
height = megcv::border_interpolate<bmode>(height, h);
width = megcv::border_interpolate<bmode>(width, w);
return src[get_offset<format>(height, width, channel, h, w, c)];
}
};
template <typename ctype, const uint32_t format>
struct GetSrcData<ctype, format, ::BorderMode::BORDER_CONSTANT> {
__device__ static inline ctype get(
const ctype* src, int height, int width, int channel, int h, int w, int c,
float scalar) {
RoundingConverter<ctype> round_converter;
return (height >= 0 && height < h && width >= 0 && width < w)
? src[get_offset<format>(height, width, channel, h, w, c)]
: round_converter(scalar);
}
};
template <typename ctype, ::BorderMode bmode>
__global__ void kern_general(
const ctype* __restrict sptr, const float* map_xy, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, float scalar) {
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
sptr += blockIdx.z * C * IH * IW;
dst += blockIdx.z * C * OH * OW;
map_xy += blockIdx.z * 2 * OH * OW;
RoundingConverter<ctype> round_converter;
if (ow < OW && oh < OH) {
float index_col = map_xy[oh * OW * 2 + ow * 2 + 0];
float index_row = map_xy[oh * OW * 2 + ow * 2 + 1];
int col = static_cast<int>(floor(index_col));
int row = static_cast<int>(floor(index_row));
float v = index_col - col;
float u = index_row - row;
for (int c = 0; c < C; ++c) {
ctype a00 = GetSrcData<ctype, param_enumv::Remap::Format::NCHW, bmode>::get(
sptr, row + 0, col + 0, c, IH, IW, C, scalar);
ctype a01 = GetSrcData<ctype, param_enumv::Remap::Format::NCHW, bmode>::get(
sptr, row + 0, col + 1, c, IH, IW, C, scalar);
ctype a10 = GetSrcData<ctype, param_enumv::Remap::Format::NCHW, bmode>::get(
sptr, row + 1, col + 0, c, IH, IW, C, scalar);
ctype a11 = GetSrcData<ctype, param_enumv::Remap::Format::NCHW, bmode>::get(
sptr, row + 1, col + 1, c, IH, IW, C, scalar);
/* in remap, we use float as the type of intermediate result */
float result = static_cast<float>(a00) * (1.f - u) * (1.f - v) +
static_cast<float>(a01) * (1.f - u) * v +
static_cast<float>(a10) * (1.f - v) * u +
static_cast<float>(a11) * u * v;
dst[get_offset<param_enumv::Remap::Format::NCHW>(oh, ow, c, OH, OW, C)] =
round_converter(result);
}
}
}
template <typename ctype, ::BorderMode bmode>
__global__ void kern_general_nhwc(
const ctype* __restrict sptr, const float* map_xy, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, float scalar) {
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
sptr += blockIdx.z * C * IH * IW;
dst += blockIdx.z * C * OH * OW;
map_xy += blockIdx.z * 2 * OH * OW;
RoundingConverter<ctype> round_converter;
if (ow < OW && oh < OH) {
float index_col = map_xy[oh * OW * 2 + ow * 2 + 0];
float index_row = map_xy[oh * OW * 2 + ow * 2 + 1];
int col = static_cast<int>(floor(index_col));
int row = static_cast<int>(floor(index_row));
float v = index_col - col;
float u = index_row - row;
for (int c = 0; c < C; ++c) {
ctype a00 = GetSrcData<ctype, param_enumv::Remap::Format::NHWC, bmode>::get(
sptr, row + 0, col + 0, c, IH, IW, C, scalar);
ctype a01 = GetSrcData<ctype, param_enumv::Remap::Format::NHWC, bmode>::get(
sptr, row + 0, col + 1, c, IH, IW, C, scalar);
ctype a10 = GetSrcData<ctype, param_enumv::Remap::Format::NHWC, bmode>::get(
sptr, row + 1, col + 0, c, IH, IW, C, scalar);
ctype a11 = GetSrcData<ctype, param_enumv::Remap::Format::NHWC, bmode>::get(
sptr, row + 1, col + 1, c, IH, IW, C, scalar);
/* in remap, we use float as the type of intermediate result */
float result = static_cast<float>(a00) * (1.f - u) * (1.f - v) +
static_cast<float>(a01) * (1.f - u) * v +
static_cast<float>(a10) * (1.f - v) * u +
static_cast<float>(a11) * u * v;
dst[get_offset<param_enumv::Remap::Format::NHWC>(oh, ow, c, OH, OW, C)] =
round_converter(result);
}
}
}
template <typename ctype, const uint32_t format, ::BorderMode bmode>
void dispatch_forward(
const ctype* src, const float* map_xy, ctype* dst, int N, int C, int IH, int IW,
int OH, int OW, float scalar, hipStream_t stream) {
const int BX = 32, BY = 16;
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
if (format == param_enumv::Remap::Format::NCHW) {
hipLaunchKernelGGL(( kern_general<ctype, bmode>), dim3(blocks), dim3(threads), 0, stream,
src, map_xy, dst, C, IH, IW, OH, OW, scalar);
} else if (format == param_enumv::Remap::Format::NHWC) {
hipLaunchKernelGGL(( kern_general_nhwc<ctype, bmode>), dim3(blocks), dim3(threads), 0, stream,
src, map_xy, dst, C, IH, IW, OH, OW, scalar);
}
N -= curr_batch_size;
src += curr_batch_size * C * IH * IW;
dst += curr_batch_size * C * OH * OW;
map_xy += curr_batch_size * OH * OW * 2;
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace remap {
template <typename ctype, const uint32_t format, ::BorderMode bmode>
void forward_proxy(
const ctype* src, const float* map_xy, ctype* dst, int N, int C, int IH, int IW,
int OH, int OW, float scalar, hipStream_t stream) {
dispatch_forward<ctype, format, bmode>(
src, map_xy, dst, N, C, IH, IW, OH, OW, scalar, stream);
after_kernel_launch();
}
#define INST(ctype, format, bmode) \
template void \
forward_proxy<ctype, param_enumv::Remap::Format::format, ::BorderMode::bmode>( \
const ctype*, const float*, ctype*, int, int, int, int, int, int, float, \
hipStream_t);
#define FOR_FORMAT_BMODE(ctype) \
INST(ctype, NCHW, BORDER_CONSTANT) \
INST(ctype, NCHW, BORDER_REPLICATE) \
INST(ctype, NCHW, BORDER_REFLECT) \
INST(ctype, NCHW, BORDER_REFLECT_101) \
INST(ctype, NCHW, BORDER_WRAP) \
INST(ctype, NHWC, BORDER_CONSTANT) \
INST(ctype, NHWC, BORDER_REPLICATE) \
INST(ctype, NHWC, BORDER_REFLECT) \
INST(ctype, NHWC, BORDER_REFLECT_101) \
INST(ctype, NHWC, BORDER_WRAP)
FOR_FORMAT_BMODE(float)
DNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_float16))
DNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_bfloat16))
FOR_FORMAT_BMODE(int8_t)
FOR_FORMAT_BMODE(uint8_t)
#undef FOR_FORMAT_BMODE
#undef INST
} // namespace remap
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
a1de2adca0de158fe09858928be6a06332b66b20.cu
|
/**
* \file dnn/src/cuda/remap/forward.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include "src/common/rounding_converter.cuh"
#include "src/cuda/cv/kernel_common.cuh"
#include "src/cuda/remap/common.h"
#include "src/cuda/utils.cuh"
using namespace megdnn;
using namespace cuda;
using namespace remap;
using namespace rounding;
namespace {
template <const uint32_t format>
__device__ inline int get_offset(
int height, int width, int channel, int h, int w, int c);
template <>
__device__ inline int get_offset<param_enumv::Remap::Format::NCHW>(
int height, int width, int channel, int h, int w, int c) {
return channel * h * w + height * w + width;
}
template <>
__device__ inline int get_offset<param_enumv::Remap::Format::NHWC>(
int height, int width, int channel, int h, int w, int c) {
return height * w * c + width * c + channel;
}
template <typename ctype, const uint32_t format, ::BorderMode bmode>
struct GetSrcData {
__device__ static inline ctype get(
const ctype* src, int height, int width, int channel, int h, int w, int c,
float) {
height = megcv::border_interpolate<bmode>(height, h);
width = megcv::border_interpolate<bmode>(width, w);
return src[get_offset<format>(height, width, channel, h, w, c)];
}
};
template <typename ctype, const uint32_t format>
struct GetSrcData<ctype, format, ::BorderMode::BORDER_CONSTANT> {
__device__ static inline ctype get(
const ctype* src, int height, int width, int channel, int h, int w, int c,
float scalar) {
RoundingConverter<ctype> round_converter;
return (height >= 0 && height < h && width >= 0 && width < w)
? src[get_offset<format>(height, width, channel, h, w, c)]
: round_converter(scalar);
}
};
template <typename ctype, ::BorderMode bmode>
__global__ void kern_general(
const ctype* __restrict sptr, const float* map_xy, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, float scalar) {
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
sptr += blockIdx.z * C * IH * IW;
dst += blockIdx.z * C * OH * OW;
map_xy += blockIdx.z * 2 * OH * OW;
RoundingConverter<ctype> round_converter;
if (ow < OW && oh < OH) {
float index_col = map_xy[oh * OW * 2 + ow * 2 + 0];
float index_row = map_xy[oh * OW * 2 + ow * 2 + 1];
int col = static_cast<int>(floor(index_col));
int row = static_cast<int>(floor(index_row));
float v = index_col - col;
float u = index_row - row;
for (int c = 0; c < C; ++c) {
ctype a00 = GetSrcData<ctype, param_enumv::Remap::Format::NCHW, bmode>::get(
sptr, row + 0, col + 0, c, IH, IW, C, scalar);
ctype a01 = GetSrcData<ctype, param_enumv::Remap::Format::NCHW, bmode>::get(
sptr, row + 0, col + 1, c, IH, IW, C, scalar);
ctype a10 = GetSrcData<ctype, param_enumv::Remap::Format::NCHW, bmode>::get(
sptr, row + 1, col + 0, c, IH, IW, C, scalar);
ctype a11 = GetSrcData<ctype, param_enumv::Remap::Format::NCHW, bmode>::get(
sptr, row + 1, col + 1, c, IH, IW, C, scalar);
/* in remap, we use float as the type of intermediate result */
float result = static_cast<float>(a00) * (1.f - u) * (1.f - v) +
static_cast<float>(a01) * (1.f - u) * v +
static_cast<float>(a10) * (1.f - v) * u +
static_cast<float>(a11) * u * v;
dst[get_offset<param_enumv::Remap::Format::NCHW>(oh, ow, c, OH, OW, C)] =
round_converter(result);
}
}
}
template <typename ctype, ::BorderMode bmode>
__global__ void kern_general_nhwc(
const ctype* __restrict sptr, const float* map_xy, ctype* __restrict dst, int C,
int IH, int IW, int OH, int OW, float scalar) {
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
sptr += blockIdx.z * C * IH * IW;
dst += blockIdx.z * C * OH * OW;
map_xy += blockIdx.z * 2 * OH * OW;
RoundingConverter<ctype> round_converter;
if (ow < OW && oh < OH) {
float index_col = map_xy[oh * OW * 2 + ow * 2 + 0];
float index_row = map_xy[oh * OW * 2 + ow * 2 + 1];
int col = static_cast<int>(floor(index_col));
int row = static_cast<int>(floor(index_row));
float v = index_col - col;
float u = index_row - row;
for (int c = 0; c < C; ++c) {
ctype a00 = GetSrcData<ctype, param_enumv::Remap::Format::NHWC, bmode>::get(
sptr, row + 0, col + 0, c, IH, IW, C, scalar);
ctype a01 = GetSrcData<ctype, param_enumv::Remap::Format::NHWC, bmode>::get(
sptr, row + 0, col + 1, c, IH, IW, C, scalar);
ctype a10 = GetSrcData<ctype, param_enumv::Remap::Format::NHWC, bmode>::get(
sptr, row + 1, col + 0, c, IH, IW, C, scalar);
ctype a11 = GetSrcData<ctype, param_enumv::Remap::Format::NHWC, bmode>::get(
sptr, row + 1, col + 1, c, IH, IW, C, scalar);
/* in remap, we use float as the type of intermediate result */
float result = static_cast<float>(a00) * (1.f - u) * (1.f - v) +
static_cast<float>(a01) * (1.f - u) * v +
static_cast<float>(a10) * (1.f - v) * u +
static_cast<float>(a11) * u * v;
dst[get_offset<param_enumv::Remap::Format::NHWC>(oh, ow, c, OH, OW, C)] =
round_converter(result);
}
}
}
template <typename ctype, const uint32_t format, ::BorderMode bmode>
void dispatch_forward(
const ctype* src, const float* map_xy, ctype* dst, int N, int C, int IH, int IW,
int OH, int OW, float scalar, cudaStream_t stream) {
const int BX = 32, BY = 16;
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
if (format == param_enumv::Remap::Format::NCHW) {
kern_general<ctype, bmode><<<blocks, threads, 0, stream>>>(
src, map_xy, dst, C, IH, IW, OH, OW, scalar);
} else if (format == param_enumv::Remap::Format::NHWC) {
kern_general_nhwc<ctype, bmode><<<blocks, threads, 0, stream>>>(
src, map_xy, dst, C, IH, IW, OH, OW, scalar);
}
N -= curr_batch_size;
src += curr_batch_size * C * IH * IW;
dst += curr_batch_size * C * OH * OW;
map_xy += curr_batch_size * OH * OW * 2;
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace remap {
template <typename ctype, const uint32_t format, ::BorderMode bmode>
void forward_proxy(
const ctype* src, const float* map_xy, ctype* dst, int N, int C, int IH, int IW,
int OH, int OW, float scalar, cudaStream_t stream) {
dispatch_forward<ctype, format, bmode>(
src, map_xy, dst, N, C, IH, IW, OH, OW, scalar, stream);
after_kernel_launch();
}
#define INST(ctype, format, bmode) \
template void \
forward_proxy<ctype, param_enumv::Remap::Format::format, ::BorderMode::bmode>( \
const ctype*, const float*, ctype*, int, int, int, int, int, int, float, \
cudaStream_t);
#define FOR_FORMAT_BMODE(ctype) \
INST(ctype, NCHW, BORDER_CONSTANT) \
INST(ctype, NCHW, BORDER_REPLICATE) \
INST(ctype, NCHW, BORDER_REFLECT) \
INST(ctype, NCHW, BORDER_REFLECT_101) \
INST(ctype, NCHW, BORDER_WRAP) \
INST(ctype, NHWC, BORDER_CONSTANT) \
INST(ctype, NHWC, BORDER_REPLICATE) \
INST(ctype, NHWC, BORDER_REFLECT) \
INST(ctype, NHWC, BORDER_REFLECT_101) \
INST(ctype, NHWC, BORDER_WRAP)
FOR_FORMAT_BMODE(float)
DNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_float16))
DNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_bfloat16))
FOR_FORMAT_BMODE(int8_t)
FOR_FORMAT_BMODE(uint8_t)
#undef FOR_FORMAT_BMODE
#undef INST
} // namespace remap
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
ae9bb3dbd06ddd6e93016fcf32258e05409f57d2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void matrixDivisionKernelEW(const float* A, const float* B, float* C, int a, int b) {
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
if (ROW < a && COL < b) {
C[ROW * a + COL] = A[ROW * b + COL]/B[ROW * b + COL];
}
}
|
ae9bb3dbd06ddd6e93016fcf32258e05409f57d2.cu
|
#include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void matrixDivisionKernelEW(const float* A, const float* B, float* C, int a, int b) {
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
if (ROW < a && COL < b) {
C[ROW * a + COL] = A[ROW * b + COL]/B[ROW * b + COL];
}
}
|
2dc2fc4b938908e7d00aed897d6ede39c86a4fe5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iostream>
#include <string>
#include <vector>
#include "NCV.hpp"
using namespace std;
//==============================================================================
//
// Error handling helpers
//
//==============================================================================
static void stdDebugOutput(const string &msg)
{
cout << msg;
}
static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput;
void ncvDebugOutput(const string &msg)
{
debugOutputHandler(msg);
}
void ncvSetDebugOutputHandler(NCVDebugOutputHandler *func)
{
debugOutputHandler = func;
}
//==============================================================================
//
// Memory wrappers and helpers
//
//==============================================================================
Ncv32u alignUp(Ncv32u what, Ncv32u alignment)
{
Ncv32u alignMask = alignment-1;
Ncv32u inverseAlignMask = ~alignMask;
Ncv32u res = (what + alignMask) & inverseAlignMask;
return res;
}
void NCVMemPtr::clear()
{
ptr = NULL;
memtype = NCVMemoryTypeNone;
}
void NCVMemSegment::clear()
{
begin.clear();
size = 0;
}
NCVStatus memSegCopyHelper(void *dst, NCVMemoryType dstType, const void *src, NCVMemoryType srcType, size_t sz, hipStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
memcpy(dst, src, sz);
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
NCVStatus memSegCopyHelper2D(void *dst, Ncv32u dstPitch, NCVMemoryType dstType,
const void *src, Ncv32u srcPitch, NCVMemoryType srcType,
Ncv32u widthbytes, Ncv32u height, hipStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
for (Ncv32u i=0; i<height; i++)
{
memcpy((char*)dst + i * dstPitch, (char*)src + i * srcPitch, widthbytes);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
//===================================================================
//
// NCVMemStackAllocator class members implementation
//
//===================================================================
NCVMemStackAllocator::NCVMemStackAllocator(Ncv32u alignment)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
begin(NULL),
end(NULL),
_memType(NCVMemoryTypeNone),
_alignment(alignment),
bReusesMemory(false)
{
NcvBool bProperAlignment = (alignment & (alignment-1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: alignment not power of 2");
}
NCVMemStackAllocator::NCVMemStackAllocator(NCVMemoryType memT, size_t capacity, Ncv32u alignment, void *reusePtr)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
_memType(memT),
_alignment(alignment)
{
NcvBool bProperAlignment = (alignment & (alignment-1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: _alignment not power of 2");
ncvAssertPrintCheck(memT != NCVMemoryTypeNone, "NCVMemStackAllocator ctor:: Incorrect allocator type");
allocBegin = NULL;
if (reusePtr == NULL && capacity != 0)
{
bReusesMemory = false;
switch (memT)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPageable:
allocBegin = (Ncv8u *)malloc(capacity);
break;
default:;
}
}
else
{
bReusesMemory = true;
allocBegin = (Ncv8u *)reusePtr;
}
if (capacity == 0)
{
allocBegin = (Ncv8u *)(0x1);
}
if (!isCounting())
{
begin = allocBegin;
end = begin + capacity;
}
}
NCVMemStackAllocator::~NCVMemStackAllocator()
{
if (allocBegin != NULL)
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemStackAllocator dtor:: not all objects were deallocated properly, forcing destruction");
if (!bReusesMemory && (allocBegin != (Ncv8u *)(0x1)))
{
switch (_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipFree(allocBegin), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostFree(allocBegin), );
break;
case NCVMemoryTypeHostPageable:
free(allocBegin);
break;
default:;
}
}
allocBegin = NULL;
}
}
NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
size = alignUp(static_cast<Ncv32u>(size), this->_alignment);
this->currentSize += size;
this->_maxSize = ::max(this->_maxSize, this->currentSize);
if (!isCounting())
{
size_t availSize = end - begin;
ncvAssertReturn(size <= availSize, NCV_ALLOCATOR_INSUFFICIENT_CAPACITY);
}
seg.begin.ptr = begin;
seg.begin.memtype = this->_memType;
seg.size = size;
begin += size;
return NCV_SUCCESS;
}
NCVStatus NCVMemStackAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL || isCounting(), NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr == begin - seg.size, NCV_ALLOCATOR_DEALLOC_ORDER);
currentSize -= seg.size;
begin -= seg.size;
seg.clear();
ncvAssertReturn(allocBegin <= begin, NCV_ALLOCATOR_BAD_DEALLOC);
return NCV_SUCCESS;
}
NcvBool NCVMemStackAllocator::isInitialized(void) const
{
return ((this->_alignment & (this->_alignment-1)) == 0) && isCounting() || this->allocBegin != NULL;
}
NcvBool NCVMemStackAllocator::isCounting(void) const
{
return this->_memType == NCVMemoryTypeNone;
}
NCVMemoryType NCVMemStackAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemStackAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemStackAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// NCVMemNativeAllocator class members implementation
//
//===================================================================
NCVMemNativeAllocator::NCVMemNativeAllocator(NCVMemoryType memT, Ncv32u alignment)
:
currentSize(0),
_maxSize(0),
_memType(memT),
_alignment(alignment)
{
ncvAssertPrintReturn(memT != NCVMemoryTypeNone, "NCVMemNativeAllocator ctor:: counting not permitted for this allocator type", );
}
NCVMemNativeAllocator::~NCVMemNativeAllocator()
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemNativeAllocator dtor:: detected memory leak");
}
NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
seg.begin.ptr = (Ncv8u *)malloc(size);
break;
default:;
}
this->currentSize += alignUp(static_cast<Ncv32u>(size), this->_alignment);
this->_maxSize = ::max(this->_maxSize, this->currentSize);
seg.begin.memtype = this->_memType;
seg.size = size;
return NCV_SUCCESS;
}
NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(currentSize >= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC);
currentSize -= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
free(seg.begin.ptr);
break;
default:;
}
seg.clear();
return NCV_SUCCESS;
}
NcvBool NCVMemNativeAllocator::isInitialized(void) const
{
return (this->_alignment != 0);
}
NcvBool NCVMemNativeAllocator::isCounting(void) const
{
return false;
}
NCVMemoryType NCVMemNativeAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemNativeAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemNativeAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// Time and timer routines
//
//===================================================================
typedef struct _NcvTimeMoment NcvTimeMoment;
#if defined(_WIN32) || defined(_WIN64)
#include <Windows.h>
typedef struct _NcvTimeMoment
{
LONGLONG moment, freq;
} NcvTimeMoment;
static void _ncvQueryMoment(NcvTimeMoment *t)
{
QueryPerformanceFrequency((LARGE_INTEGER *)&(t->freq));
QueryPerformanceCounter((LARGE_INTEGER *)&(t->moment));
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->moment / t->freq;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
#elif defined(__GNUC__)
#include <sys/time.h>
typedef struct _NcvTimeMoment
{
struct timeval tv;
struct timezone tz;
} NcvTimeMoment;
void _ncvQueryMoment(NcvTimeMoment *t)
{
gettimeofday(& t->tv, & t->tz);
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->tv.tv_sec + (double)t->tv.tv_usec;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return (((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000000 + (double)t2->tv.tv_usec - (double)t1->tv.tv_usec);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return ((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000;
}
#endif //#if defined(_WIN32) || defined(_WIN64)
struct _NcvTimer
{
NcvTimeMoment t1, t2;
};
NcvTimer ncvStartTimer(void)
{
struct _NcvTimer *t;
t = (struct _NcvTimer *)malloc(sizeof(struct _NcvTimer));
_ncvQueryMoment(&t->t1);
return t;
}
double ncvEndQueryTimerUs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMicroseconds(&t->t1, &t->t2);
free(t);
return res;
}
double ncvEndQueryTimerMs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMilliseconds(&t->t1, &t->t2);
free(t);
return res;
}
//===================================================================
//
// Operations with rectangles
//
//===================================================================
//from OpenCV
void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights);
NCVStatus ncvGroupRectangles_host(NCVVector<NcvRect32u> &hypotheses,
Ncv32u &numHypotheses,
Ncv32u minNeighbors,
Ncv32f intersectEps,
NCVVector<Ncv32u> *hypothesesWeights)
{
ncvAssertReturn(hypotheses.memType() == NCVMemoryTypeHostPageable ||
hypotheses.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
if (hypothesesWeights != NULL)
{
ncvAssertReturn(hypothesesWeights->memType() == NCVMemoryTypeHostPageable ||
hypothesesWeights->memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
}
if (numHypotheses == 0)
{
return NCV_SUCCESS;
}
std::vector<NcvRect32u> rects(numHypotheses);
memcpy(&rects[0], hypotheses.ptr(), numHypotheses * sizeof(NcvRect32u));
std::vector<Ncv32u> weights;
if (hypothesesWeights != NULL)
{
groupRectangles(rects, minNeighbors, intersectEps, &weights);
}
else
{
groupRectangles(rects, minNeighbors, intersectEps, NULL);
}
numHypotheses = (Ncv32u)rects.size();
if (numHypotheses > 0)
{
memcpy(hypotheses.ptr(), &rects[0], numHypotheses * sizeof(NcvRect32u));
}
if (hypothesesWeights != NULL)
{
memcpy(hypothesesWeights->ptr(), &weights[0], numHypotheses * sizeof(Ncv32u));
}
return NCV_SUCCESS;
}
template <class T>
static NCVStatus drawRectsWrapperHost(T *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
T color)
{
ncvAssertReturn(h_dst != NULL && h_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects != 0, NCV_SUCCESS);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
for (Ncv32u i=0; i<numRects; i++)
{
NcvRect32u rect = h_rects[i];
if (rect.x < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
{
h_dst[i*dstStride+rect.x] = color;
}
}
if (rect.x+rect.width-1 < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
{
h_dst[i*dstStride+rect.x+rect.width-1] = color;
}
}
if (rect.y < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[rect.y*dstStride+j] = color;
}
}
if (rect.y + rect.height - 1 < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[(rect.y+rect.height-1)*dstStride+j] = color;
}
}
}
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_host(Ncv8u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv8u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
NCVStatus ncvDrawRects_32u_host(Ncv32u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv32u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
const Ncv32u NUMTHREADS_DRAWRECTS = 32;
const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5;
template <class T>
__global__ void drawRects(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
if (blockId > numRects * 4)
{
return;
}
NcvRect32u curRect = d_rects[blockId >> 2];
NcvBool bVertical = blockId & 0x1;
NcvBool bTopLeft = blockId & 0x2;
Ncv32u pt0x, pt0y;
if (bVertical)
{
Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1;
pt0y = curRect.y;
if (pt0x < dstWidth)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptY < pt0y + curRect.height && ptY < dstHeight)
{
d_dst[ptY * dstStride + pt0x] = color;
}
}
}
}
else
{
Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = curRect.x;
pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1;
if (pt0y < dstHeight)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptX < pt0x + curRect.width && ptX < dstWidth)
{
d_dst[pt0y * dstStride + ptX] = color;
}
}
}
}
}
template <class T>
static NCVStatus drawRectsWrapperDevice(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color,
hipStream_t cuStream)
{
ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
if (numRects == 0)
{
return NCV_SUCCESS;
}
dim3 grid(numRects * 4);
dim3 block(NUMTHREADS_DRAWRECTS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
hipLaunchKernelGGL(( drawRects<T>), dim3(grid), dim3(block), 0, 0, d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv8u color,
hipStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv32u color,
hipStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
|
2dc2fc4b938908e7d00aed897d6ede39c86a4fe5.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iostream>
#include <string>
#include <vector>
#include "NCV.hpp"
using namespace std;
//==============================================================================
//
// Error handling helpers
//
//==============================================================================
static void stdDebugOutput(const string &msg)
{
cout << msg;
}
static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput;
void ncvDebugOutput(const string &msg)
{
debugOutputHandler(msg);
}
void ncvSetDebugOutputHandler(NCVDebugOutputHandler *func)
{
debugOutputHandler = func;
}
//==============================================================================
//
// Memory wrappers and helpers
//
//==============================================================================
Ncv32u alignUp(Ncv32u what, Ncv32u alignment)
{
Ncv32u alignMask = alignment-1;
Ncv32u inverseAlignMask = ~alignMask;
Ncv32u res = (what + alignMask) & inverseAlignMask;
return res;
}
void NCVMemPtr::clear()
{
ptr = NULL;
memtype = NCVMemoryTypeNone;
}
void NCVMemSegment::clear()
{
begin.clear();
size = 0;
}
NCVStatus memSegCopyHelper(void *dst, NCVMemoryType dstType, const void *src, NCVMemoryType srcType, size_t sz, cudaStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
memcpy(dst, src, sz);
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
NCVStatus memSegCopyHelper2D(void *dst, Ncv32u dstPitch, NCVMemoryType dstType,
const void *src, Ncv32u srcPitch, NCVMemoryType srcType,
Ncv32u widthbytes, Ncv32u height, cudaStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
for (Ncv32u i=0; i<height; i++)
{
memcpy((char*)dst + i * dstPitch, (char*)src + i * srcPitch, widthbytes);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
//===================================================================
//
// NCVMemStackAllocator class members implementation
//
//===================================================================
NCVMemStackAllocator::NCVMemStackAllocator(Ncv32u alignment)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
begin(NULL),
end(NULL),
_memType(NCVMemoryTypeNone),
_alignment(alignment),
bReusesMemory(false)
{
NcvBool bProperAlignment = (alignment & (alignment-1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: alignment not power of 2");
}
NCVMemStackAllocator::NCVMemStackAllocator(NCVMemoryType memT, size_t capacity, Ncv32u alignment, void *reusePtr)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
_memType(memT),
_alignment(alignment)
{
NcvBool bProperAlignment = (alignment & (alignment-1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: _alignment not power of 2");
ncvAssertPrintCheck(memT != NCVMemoryTypeNone, "NCVMemStackAllocator ctor:: Incorrect allocator type");
allocBegin = NULL;
if (reusePtr == NULL && capacity != 0)
{
bReusesMemory = false;
switch (memT)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaMallocHost(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPageable:
allocBegin = (Ncv8u *)malloc(capacity);
break;
default:;
}
}
else
{
bReusesMemory = true;
allocBegin = (Ncv8u *)reusePtr;
}
if (capacity == 0)
{
allocBegin = (Ncv8u *)(0x1);
}
if (!isCounting())
{
begin = allocBegin;
end = begin + capacity;
}
}
NCVMemStackAllocator::~NCVMemStackAllocator()
{
if (allocBegin != NULL)
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemStackAllocator dtor:: not all objects were deallocated properly, forcing destruction");
if (!bReusesMemory && (allocBegin != (Ncv8u *)(0x1)))
{
switch (_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaFree(allocBegin), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaFreeHost(allocBegin), );
break;
case NCVMemoryTypeHostPageable:
free(allocBegin);
break;
default:;
}
}
allocBegin = NULL;
}
}
NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
size = alignUp(static_cast<Ncv32u>(size), this->_alignment);
this->currentSize += size;
this->_maxSize = std::max(this->_maxSize, this->currentSize);
if (!isCounting())
{
size_t availSize = end - begin;
ncvAssertReturn(size <= availSize, NCV_ALLOCATOR_INSUFFICIENT_CAPACITY);
}
seg.begin.ptr = begin;
seg.begin.memtype = this->_memType;
seg.size = size;
begin += size;
return NCV_SUCCESS;
}
NCVStatus NCVMemStackAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL || isCounting(), NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr == begin - seg.size, NCV_ALLOCATOR_DEALLOC_ORDER);
currentSize -= seg.size;
begin -= seg.size;
seg.clear();
ncvAssertReturn(allocBegin <= begin, NCV_ALLOCATOR_BAD_DEALLOC);
return NCV_SUCCESS;
}
NcvBool NCVMemStackAllocator::isInitialized(void) const
{
return ((this->_alignment & (this->_alignment-1)) == 0) && isCounting() || this->allocBegin != NULL;
}
NcvBool NCVMemStackAllocator::isCounting(void) const
{
return this->_memType == NCVMemoryTypeNone;
}
NCVMemoryType NCVMemStackAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemStackAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemStackAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// NCVMemNativeAllocator class members implementation
//
//===================================================================
NCVMemNativeAllocator::NCVMemNativeAllocator(NCVMemoryType memT, Ncv32u alignment)
:
currentSize(0),
_maxSize(0),
_memType(memT),
_alignment(alignment)
{
ncvAssertPrintReturn(memT != NCVMemoryTypeNone, "NCVMemNativeAllocator ctor:: counting not permitted for this allocator type", );
}
NCVMemNativeAllocator::~NCVMemNativeAllocator()
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemNativeAllocator dtor:: detected memory leak");
}
NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaMallocHost(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
seg.begin.ptr = (Ncv8u *)malloc(size);
break;
default:;
}
this->currentSize += alignUp(static_cast<Ncv32u>(size), this->_alignment);
this->_maxSize = std::max(this->_maxSize, this->currentSize);
seg.begin.memtype = this->_memType;
seg.size = size;
return NCV_SUCCESS;
}
NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(currentSize >= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC);
currentSize -= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaFreeHost(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
free(seg.begin.ptr);
break;
default:;
}
seg.clear();
return NCV_SUCCESS;
}
NcvBool NCVMemNativeAllocator::isInitialized(void) const
{
return (this->_alignment != 0);
}
NcvBool NCVMemNativeAllocator::isCounting(void) const
{
return false;
}
NCVMemoryType NCVMemNativeAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemNativeAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemNativeAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// Time and timer routines
//
//===================================================================
typedef struct _NcvTimeMoment NcvTimeMoment;
#if defined(_WIN32) || defined(_WIN64)
#include <Windows.h>
typedef struct _NcvTimeMoment
{
LONGLONG moment, freq;
} NcvTimeMoment;
static void _ncvQueryMoment(NcvTimeMoment *t)
{
QueryPerformanceFrequency((LARGE_INTEGER *)&(t->freq));
QueryPerformanceCounter((LARGE_INTEGER *)&(t->moment));
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->moment / t->freq;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
#elif defined(__GNUC__)
#include <sys/time.h>
typedef struct _NcvTimeMoment
{
struct timeval tv;
struct timezone tz;
} NcvTimeMoment;
void _ncvQueryMoment(NcvTimeMoment *t)
{
gettimeofday(& t->tv, & t->tz);
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->tv.tv_sec + (double)t->tv.tv_usec;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return (((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000000 + (double)t2->tv.tv_usec - (double)t1->tv.tv_usec);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return ((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000;
}
#endif //#if defined(_WIN32) || defined(_WIN64)
struct _NcvTimer
{
NcvTimeMoment t1, t2;
};
NcvTimer ncvStartTimer(void)
{
struct _NcvTimer *t;
t = (struct _NcvTimer *)malloc(sizeof(struct _NcvTimer));
_ncvQueryMoment(&t->t1);
return t;
}
double ncvEndQueryTimerUs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMicroseconds(&t->t1, &t->t2);
free(t);
return res;
}
double ncvEndQueryTimerMs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMilliseconds(&t->t1, &t->t2);
free(t);
return res;
}
//===================================================================
//
// Operations with rectangles
//
//===================================================================
//from OpenCV
void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights);
NCVStatus ncvGroupRectangles_host(NCVVector<NcvRect32u> &hypotheses,
Ncv32u &numHypotheses,
Ncv32u minNeighbors,
Ncv32f intersectEps,
NCVVector<Ncv32u> *hypothesesWeights)
{
ncvAssertReturn(hypotheses.memType() == NCVMemoryTypeHostPageable ||
hypotheses.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
if (hypothesesWeights != NULL)
{
ncvAssertReturn(hypothesesWeights->memType() == NCVMemoryTypeHostPageable ||
hypothesesWeights->memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
}
if (numHypotheses == 0)
{
return NCV_SUCCESS;
}
std::vector<NcvRect32u> rects(numHypotheses);
memcpy(&rects[0], hypotheses.ptr(), numHypotheses * sizeof(NcvRect32u));
std::vector<Ncv32u> weights;
if (hypothesesWeights != NULL)
{
groupRectangles(rects, minNeighbors, intersectEps, &weights);
}
else
{
groupRectangles(rects, minNeighbors, intersectEps, NULL);
}
numHypotheses = (Ncv32u)rects.size();
if (numHypotheses > 0)
{
memcpy(hypotheses.ptr(), &rects[0], numHypotheses * sizeof(NcvRect32u));
}
if (hypothesesWeights != NULL)
{
memcpy(hypothesesWeights->ptr(), &weights[0], numHypotheses * sizeof(Ncv32u));
}
return NCV_SUCCESS;
}
template <class T>
static NCVStatus drawRectsWrapperHost(T *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
T color)
{
ncvAssertReturn(h_dst != NULL && h_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects != 0, NCV_SUCCESS);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
for (Ncv32u i=0; i<numRects; i++)
{
NcvRect32u rect = h_rects[i];
if (rect.x < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
{
h_dst[i*dstStride+rect.x] = color;
}
}
if (rect.x+rect.width-1 < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
{
h_dst[i*dstStride+rect.x+rect.width-1] = color;
}
}
if (rect.y < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[rect.y*dstStride+j] = color;
}
}
if (rect.y + rect.height - 1 < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[(rect.y+rect.height-1)*dstStride+j] = color;
}
}
}
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_host(Ncv8u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv8u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
NCVStatus ncvDrawRects_32u_host(Ncv32u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv32u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
const Ncv32u NUMTHREADS_DRAWRECTS = 32;
const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5;
template <class T>
__global__ void drawRects(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
if (blockId > numRects * 4)
{
return;
}
NcvRect32u curRect = d_rects[blockId >> 2];
NcvBool bVertical = blockId & 0x1;
NcvBool bTopLeft = blockId & 0x2;
Ncv32u pt0x, pt0y;
if (bVertical)
{
Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1;
pt0y = curRect.y;
if (pt0x < dstWidth)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptY < pt0y + curRect.height && ptY < dstHeight)
{
d_dst[ptY * dstStride + pt0x] = color;
}
}
}
}
else
{
Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = curRect.x;
pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1;
if (pt0y < dstHeight)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptX < pt0x + curRect.width && ptX < dstWidth)
{
d_dst[pt0y * dstStride + ptX] = color;
}
}
}
}
}
template <class T>
static NCVStatus drawRectsWrapperDevice(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color,
cudaStream_t cuStream)
{
ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
if (numRects == 0)
{
return NCV_SUCCESS;
}
dim3 grid(numRects * 4);
dim3 block(NUMTHREADS_DRAWRECTS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
drawRects<T><<<grid, block>>>(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv8u color,
cudaStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv32u color,
cudaStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
|
37bc1c188d731c6c67ce1acbfb377b6069e01921.hip
|
// !!! This is a file automatically generated by hipify!!!
// Illustrates the main functions used in CUDA to allocate and free memory, and copy it between host and device
#include "../COMMON/commons.cuh"
#define N 10000
#define H2D hipMemcpyHostToDevice
int main(int argc, char **argv){
// Create a host vector in host / cpu memory
float *hv = new float[N];
// Initialize it
for (int i = 0; i < N; i++){
hv[i] = i * sqrt(2);
}
// Declare a pointer that will reside on the device
float *dv;
// Allocate memory using this last pointer. Note that the function requires the adress of the pointer
CUDA_CHECK(hipMalloc(&dv, N * sizeof(float)));
// Initialize it to 0 (not necessary if will be copied over but good practice)
CUDA_CHECK(hipMemset(dv, 0, N * sizeof(float)));
// Copy the array from CPU to GPU (host to device)
CUDA_CHECK(hipMemcpy(dv, hv, N * sizeof(float), hipMemcpyHostToDevice)); // hipMemcpyDefault works also
// Illustration
CUDA_CHECK(hipMemcpy(dv, hv, N * sizeof(float), hipMemcpyDefault)); // works also
CUDA_CHECK(hipMemcpy(dv, hv, N * sizeof(float), H2D)); // using a macro fro shortcut
// Perform some operations on it
// ...
// Get the data back to the host
CUDA_CHECK(hipMemcpy(hv, dv, N * sizeof(float), hipMemcpyDeviceToHost));
// Free useless device memory
CUDA_CHECK(hipFree(dv));
// Synchronize
CUDA_CHECK(hipDeviceSynchronize());
printf("Success\n");
return 0;
}
|
37bc1c188d731c6c67ce1acbfb377b6069e01921.cu
|
// Illustrates the main functions used in CUDA to allocate and free memory, and copy it between host and device
#include "../COMMON/commons.cuh"
#define N 10000
#define H2D cudaMemcpyHostToDevice
int main(int argc, char **argv){
// Create a host vector in host / cpu memory
float *hv = new float[N];
// Initialize it
for (int i = 0; i < N; i++){
hv[i] = i * sqrt(2);
}
// Declare a pointer that will reside on the device
float *dv;
// Allocate memory using this last pointer. Note that the function requires the adress of the pointer
CUDA_CHECK(cudaMalloc(&dv, N * sizeof(float)));
// Initialize it to 0 (not necessary if will be copied over but good practice)
CUDA_CHECK(cudaMemset(dv, 0, N * sizeof(float)));
// Copy the array from CPU to GPU (host to device)
CUDA_CHECK(cudaMemcpy(dv, hv, N * sizeof(float), cudaMemcpyHostToDevice)); // cudaMemcpyDefault works also
// Illustration
CUDA_CHECK(cudaMemcpy(dv, hv, N * sizeof(float), cudaMemcpyDefault)); // works also
CUDA_CHECK(cudaMemcpy(dv, hv, N * sizeof(float), H2D)); // using a macro fro shortcut
// Perform some operations on it
// ...
// Get the data back to the host
CUDA_CHECK(cudaMemcpy(hv, dv, N * sizeof(float), cudaMemcpyDeviceToHost));
// Free useless device memory
CUDA_CHECK(cudaFree(dv));
// Synchronize
CUDA_CHECK(cudaThreadSynchronize());
printf("Success\n");
return 0;
}
|
c2dc845dd1ec2b4192a6215edee153451774bb10.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// main.cpp
// DS
//
// Created by Shubham Gupta on 31/03/17.
// Copyright 2017 Shubham Gupta. All rights reserved.
// Modified by Utkarsh Aashu Mishra on 5/02/2014
// Copyright 2018 Utkarsh Aashu Mishra. All rights reserved.
#include <stdio.h>
#include <iostream>
#include <cmath>
#include <string.h>
#include <algorithm>
#include <fstream>
#include<sstream>
#include <iomanip>
#include <ctime>
using namespace std;
#define PI 3.1415926535897932
#define DPI 6.283185307179586
#define SPI 1.772453850905516
#define BOLTZ 1.380658e-23
#define AVOG 6.022169e26
void ALLOCATE_GAS();
void HARD_SPHERE();
void ARGON();
void IDEAL_NITROGEN();
void REAL_OXYGEN();
void IDEAL_AIR();
void REAL_AIR();
void HELIUM_ARGON_XENON();
void OXYGEN_HYDROGEN();
void INITIALISE_SAMPLES();
void DERIVED_GAS_DATA();
void SET_INITIAL_STATE_1D();
void MOLECULES_ENTER_1D();
void FIND_CELL_1D(double &,int &,int &);
void FIND_CELL_MB_1D(double &,int &,int &,double &);
void RVELC(double &,double &,double &);
void SROT(int &,double &,double &);
void SVIB(int &,double &,int &, int&);
void SELE(int &,double &,double &);
void CQAX(double&,double &,double&);
void LBS(double,double,double&);
void REFLECT_1D(int&,int,double&);
void RBC(double &, double &, double & , double &, double &,double &);
void AIFX(double & ,double &, double & , double &, double &, double&, double &, double&);
void REMOVE_MOL(int &);
void INDEX_MOLS();
void SAMPLE_FLOW();
void ADAPT_CELLS_1D();
void EXTEND_MNM(double);
void DISSOCIATION();
void ENERGY(int ,double &);
void COLLISIONS();
void SETXT();
void READ_RESTART();
void WRITE_RESTART();
void READ_DATA();
void OUTPUT_RESULTS();
void MOLECULES_MOVE_1D();
class Managed
{
public:
void *operator new(size_t len) {
void *ptr;
hipMallocManaged(&ptr, len);
hipDeviceSynchronize();
return ptr;
}
void operator delete(void *ptr) {
hipDeviceSynchronize();
hipFree(ptr);
}
};
class CALC : public Managed
{
public:
//declares the variables associated with the calculation
int NVER,MVER,IMEG,NREL,MOLSC,ISF,ISAD,ISECS,IGS,IREM,NNC,IMTS,ERROR,NLINE,ICLASS,NCLASS,NMCC,NMI,NMP,ICN;
double FTIME,TLIM,FNUM,DTM,TREF,TSAMP,TOUT,SAMPRAT,OUTRAT,RANF,TOTCOLI,TOTMOVI,TENERGY,DTSAMP,DTOUT,TPOUT,FRACSAM,TOTMOV,TOTCOL,ENTMASS,ENTREM,CPDTM,TPDTM,TNORM,FNUMF;
double *VNMAX,*TDISS,*TRECOMB,*ALOSS,*EME,*AJM;
double **TCOL;
void d_allocate(int x, double*&arr){
hipMallocManaged(&arr, x*sizeof(double));
}
void d_allocate(int x, int y, double**&arr){
hipMallocManaged(&arr, x*sizeof(double));
for(int i =0; i< x; ++i)
hipMallocManaged(&arr[i], y*sizeof(double));
}
//NVER.MVER.NREL the version number
//IMEG the initial number of megabytes to be used by the program
//MOLSC the target number of molecules per sampling cell
//FTIME the flow time
//TLIM the time at which the calculation stops
//FNUM the number of real molecules represented by each simulated molecule
//CPDTM the maximum number of collisions per time step (standard 0.2)
//TPDTM the maximum number of sampling cell transit times of the flow per time step
//TOTMOV total molecule moves
//TOTCOL total collisions
//TDISS(L) dissociations of species L since sample reset
//TRECOMB(L) recombinations of species L since sample reset
//ENTMASS the current entry mass of which a fraction FREM is to be removed
//ENTREM the remainder (always negative) after molecule removal
//VNMAX(L) the maximum normal velocity component of species L
//TCOL species dependent collision counter
//ISF 0,1 for steady, unsteady flow sampling
//ISAD 0,1 to not automatically adapt cells each output interval in unsteady sampling, 1 to automatically adapt
//ISECS 0,1 for no secondary stream,a secondary stream that applies for positive values of x
//IREM data item to set type of molecule removal
//NNC 0 for normal collisions, 1 for nearest neighbor collisions
//IMTS 0 for uniform move time steps, 1 for time steps that vary over the cells, 2 for fixed time steps
//IGS 0 for initial gas, 1 for stream(s) or reference gas
//ICLASS class of flow
//NCLASS the dimension of PX for the class of flow
//NMCC desired number of molecules in a collision cell
//NMI the initial number of molecules
//TNORM normalizing time (may vary e.g. mean collision time , or a transit time)
//ALOSS(L) number of molecules of speciel L lost in the move rourine
//EME(L) number of species L that enter the front boundary
//AJM(L) the adjustment number to allow for negative downstream entry numbers
//NMP the number of molecules at the start of the move routine
//ICN 0 if molecules with ITYPE(2)=4 are not kept constant, 1 to keep molecule number constant
//FNUMF adjustment factor that is applied to automatically generated value
};
class MOLECS : public Managed
{
//declares the variables associated with the molecules
public:
int *IPCELL,*IPSP,*ICREF,*IPCP;
int **IPVIB;
void i_allocate(int x, int *&arr){
hipMallocManaged(&arr, x*sizeof(int));
}
void i_allocate(int x, int y, int **&arr){
hipMallocManaged(&arr, x*sizeof(int));
for(int i =0; i< x; ++i)
hipMallocManaged(&arr[i], y*sizeof(int));
}
double **PX,**PV;
double *PTIM,*PROT,*PELE;
void d_allocate(int x, double *&arr){
hipMallocManaged(&arr, x*sizeof(double));
}
void d_allocate(int x, int y, double **&arr){
hipMallocManaged(&arr, x*sizeof(double));
for(int i =0; i< x; ++i){
try{
hipMallocManaged(&arr[i], y*sizeof(double));
}
catch (std::bad_alloc& ba){
std::cerr << "bad_alloc caught: " << ba.what() << '\n';
}
}
}
int NM,MNM;
//PX(1,2 or 3,N) x,y,z position coordinates of molecule N
//PTIM(N) molecule time
//IPSP(N) the molecular species
//IPCELL(N) the collision cell number
//ICREF the cross-reference array (molecule numbers in order of collision cells)
//IPCP(N) the code number of the last collision partner of molecule
//PV(1-3,N) u,v,w velocity components
//PROT(N) rotational energy
//IPVIB(K,N) level of vibrational mode K of molecule N
//PELE(N) electronic energy
//NM number of molecules
//MNM the maximum number of molecules
};
class GAS : public Managed
{
//declares the variables associated with the molecular species and the stream definition
public:
double RMAS,CXSS,RGFS,VMPM,FDEN,FPR,FMA,FPM,CTM;
double FND[3],FTMP[3],FVTMP[3],VFX[3],VFY[3],TSURF[3],FSPEC[3],VSURF[3];
double *ERS,*CR,*TNEX,*PSF,*SLER,*FP;
double **FSP,**SP,**SPR,**SPV,**VMP;
double ***SPM,***SPVM,***ENTR,***QELC,***SPRT;
double ****SPEX,****SPRC,****SPRP;
double *****SPREX;
void d_allocate(int x, double *&arr){
hipMallocManaged(&arr, x*sizeof(double));
}
void d_allocate(int x, int y, double **&arr){
hipMallocManaged(&arr, x*sizeof(double));
for(int i =0; i< x; ++i)
hipMallocManaged(&arr[i], y*sizeof(double));
}
void d_allocate(int x, int y, int z, double***&arr){
hipMallocManaged(&arr, x*sizeof(double));
for (int i = 0; i < x; ++i)
{
hipMallocManaged(&arr[i], y*sizeof(double));
for (int j = 0; j < y; ++j)
hipMallocManaged(&arr[i][j], z*sizeof(double));
}
}
void d_allocate(int x, int y, int z, int w, double ****&arr){
hipMallocManaged(&arr, x*sizeof(double));
for (int i = 0; i < x; ++i)
{
hipMallocManaged(&arr[i], y*sizeof(double));
for (int j = 0; j < y; ++j)
{
hipMallocManaged(&arr[i][j], z*sizeof(double));
for(int k=0; k<z; ++k)
hipMallocManaged(&arr[i][j][k], w*sizeof(double));
}
}
}
void d_allocate(int x, int y, int z, int w, int v, double*****&arr){
hipMallocManaged(&arr, x*sizeof(double));
for (int i = 0; i < x; ++i)
{
hipMallocManaged(&arr[i], y*sizeof(double));
for (int j = 0; j < y; ++j)
{
hipMallocManaged(&arr[i][j], z*sizeof(double));
for(int k=0; k<z; ++k)
{
hipMallocManaged(&arr[i][j][k], w*sizeof(double));
for(int l=0; l<w; ++l)
hipMallocManaged(&arr[i][j][k][l], v*sizeof(double));
}
}
}
}
int MSP,MMVM,MMRM,MNSR,IGAS,MMEX,MEX,MELE,MVIBL;
int *ISP,*ISPV,*NELL;
int **ISPR,**LIS,**LRS,**ISRCD,**ISPRC,**ISPRK,**TREACG,**TREACL,**NSPEX,**NSLEV;
int ***ISPVM,***NEX;
int ****ISPEX;
void i_allocate(int x, int *&arr){
hipMallocManaged(&arr, x);
}
void i_allocate(int x, int y, int **&arr){
hipMallocManaged(&arr, x*sizeof(int));
for(int i =0; i< x; ++i)
hipMallocManaged(&arr[i], y*sizeof(int));
}
void i_allocate(int x, int y, int z, int ***&arr){
hipMallocManaged(&arr, x*sizeof(int));
for (int i = 0; i < x; ++i)
{
hipMallocManaged(&arr[i], y*sizeof(int));
for (int j = 0; j < y; ++j)
hipMallocManaged(&arr[i][j], z*sizeof(int));
}
}
void i_allocate(int x, int y, int z, int w, int ****&arr){
hipMallocManaged(&arr, x*sizeof(int));
for (int i = 0; i < x; ++i)
{
hipMallocManaged(&arr[i], y*sizeof(int));
for (int j = 0; j < y; ++j)
{
hipMallocManaged(&arr[i][j], z*sizeof(int));
for(int k=0; k<z; ++k)
hipMallocManaged(&arr[i][j][k], w*sizeof(int));
}
}
}
//MSP the number of molecular species
//MMVM the maximum number of vibrational modes of any species
//MEX number of exchange or chain reactions
//MELE the maximum number of electronic states of any molecule
//MVIBL the maximum number of vibrational levels for detailed balance lists
//MMEX the maximum number of exchange reactions involving the same precollision pair of molecules
//MMRM 0 if gass is completely monatomic, 1 if some species have rotation
//MNSR the number oF surface reactions
//SP(1,L) the reference diameter of species L
//SP(2,L) the reference temperature of species L
//SP(3,L) the viscosity-temperature power law of species L
//SP(4,L) the reciprocal of the VSS scattering parameter
//SP(5,L) molecular mass of species L
//SP(6,L) the heat of formation at 273 K.
//ISPR(1,L) number of rotational degrees of freedom of species L
//ISPR(2,L) 0,1 for constant, polynomial rotational relaxation collision number
//SPR(1,L) constant rotational relaxation collision number of species L
// or the constant in a second order polynomial in temperature
//SPR(2,L) the coefficient of temperature in the polynomial
//SPR(3,L) the coefficient of temperature squared in the polynomial
//SPM(1,L,M) the reduced mass for species L,M
//SPM(2,L,M) the reference collision cross-section for species L,M
//SPM(3,L,M) the mean value of the viscosity-temperature power law
//SPM(4,L,M) the reference diameter for L,M collisions
//SPM(5,L,M) the reference temperature for species L,M
//SPM(6,L,M) reciprocal of the gamma function of (5/2-w) for species L,M
//SPM(7,L,M) rotational relaxation collision number for species L,M, or const in polynomial
//SPM(8,L,M) reciprocal of VSS scattering parameter
//ISPV(L) the number of vibrational modes
//SPVM(1,K,L) the characteristic vibrational temperature
//SPVM(2,K,L) constant Zv, or reference Zv for mode K
//SPVM(3,K,L) -1. for constant Zv, or reference temperature
//SPVM(4,K,L) the characteristic dissociation temperature
//SPVM(5,K,L) the arbitrary rate reduction factor
//ISPVM(1,K,L) the species code of the first dissociation product
//ISPVM(2,K,L) the species code of the second dissociation product
//NELL(L) the number of electronic levels of species L
//QELC(N,M,L) for up to M levels of form g*exp(-a/T) in the electronic partition function for species L
// N=1 for the degeneracy g
// N=2 for the coefficient a
// N=3 for the ratio of the excitation cross-section to the elastic cross-section
//ISPRC(L,M) the species of the recombined molecule from species L and M
//ISPRK(L,M) the applicable vibrational mode of this species
//SPRC(1,L,M,K) the constant a in the ternary collision volume
//SPRC(2,L,M,K) the temperature exponent b in the ternary collision volume
//SPRT(1,L,M) lower temperature value for SPRP
//SPRT(2,L,M) higher temperature value for SPRP
//SPRP(1,L,M,K) the cumulative dissociation distribution to level K for products L and M at the lower temperature
//SPRP(2,L,M,K) ditto at higher temperature, for application to post-recombination molecule//
//NSPEX(L,M) the number of exchange reactios with L,M as the pre-collision species
//in the following variables, J is the reaction number (1 to NSPEX(L,M))
//ISPEX(J,1,L,M) the species that splits in an exchange reaction
//ISPEX(J,2,L,M) the other pre-reaction species (all ISPEX are set to 0 if no exchange reaction)
//ISPEX(J,3,L,M) the post-reaction molecule that splits in the opposite reaction
//ISPEX(J,4,L,M) the other post-reaction species
//ISPEX(J,5,L,M) the vibrational mode of the molecule that splits
//ISPEX(J,6,L,M) degeneracy of this reaction
//ISPEX(J,7,L,M) the vibrational mode of the molecule that splits
//SPEX(1,J,L,M) the constant a in the reaction probability for the reverse reaction
//SPEX(2,J,L,M) the temperature exponent b in the reaction probability (reverse reaction only)
//SPEX(3,J,L,M) for the heat of reaction
//SPEX(4,J,L,M) the lower temperature for SPREX
//SPEX(5,J,L,M) the higher temperature for SPREX
//SPEX(6,J,L,M) the energy barrier
//SPREX(1,J,L,M,K) at lower temperature, the Jth reverse exchange reaction of L,M cumulative level K viv. dist of post reac mol
//SPREX(2,J,L,M,K) ditto at higher temperature
//TNEX(N) total number of exchange reaction N
//NEX(N,L,M) the code number of the Nth exchange or chain reaction in L,M collisions
//RMAS reduced mass for single species case
//CXSS reference cross-section for single species case
//RGFS reciprocal of gamma function for single species case
//for the following, J=1 for the reference gas and/or the minimum x boundary, J=2 for the secondary sream at maximum x boundary
//FND(J) stream or reference gas number density
//FTMP(J) stream temperature
//FVTMP(J) the vibrational and any electronic temperature in the freestream
//VFX(J) the x velocity components of the stream
//VFY(J) the y velocity component in the stream
//FSP(N,J)) fraction of species N in the stream
//FMA stream Mach number
//VMP(N,J) most probable molecular velocity of species N at FTMP(J)
//VMPM the maximum value of VMP in stream 1
//ENTR(M,L,K) entry/removal information for species L at K=1 for 1, K=2 for XB(2)
// M=1 number per unut time
// M=2 remainder
// M=3 speed ratio
// M=4 first constant
// M=5 second constant
// M=6 the maxinum normal velocity component in the removal zone (> XREM)
//LIS(1,N) the species code of the first incident molecule
//LIS(2,N) the species code of the second incident molecule (0 if none)
//LRS(1,N) the species code of the first reflected molecule
//LRS(2,N) the species code of the second reflected molecule (0 if none)
//LRS(3,N) the species code of the third reflected molecule (0 if none)
//LRS(4,N) the species code of the fourth reflected molecule (0 if none)
//LRS(5,N) the species code of the fifth reflected molecule (0 if none)
//LRS(6,N) the species code of the sixth reflected molecule (0 if none)
//ERS(N) the energy of the reaction (+ve for recombination, -ve for dissociation)
//NSRSP(L) number of surface reactions that involve species L as incident molecule
//ISRCD(N,L) code number of Nth surface reaction with species L as incident molecule
//CTM mean collision time in stream
//FPM mean free path in stream
//FDEN stream 1 density
//FPR stream 1 pressure
//FMA stream 1 Mach number
//RMAS reduced mass for single species case
//CXSS reference cross-section for single species case
//RGFS reciprocal of gamma function for single species case
//CR(L) collision rate of species L
//FP(L) mean free path of species L
//TREACG(N,L) the total number of species L gained from reaction type N=1 for dissociation, 2 for recombination, 3 for forward exchange, 4 for reverse exchange
//TREACL(N,L) the total number of species L lost from reaction type N=1 for dissociation, 2 for recombination, 3 for forward exchange, 4 for reverse exchange
//NSLEV(2,L) 1 exo, 2 endo: vibrational levels to be made up for species L in detailed balance enforcement after reaction
//SLER(L) rotational energy to be made up for species L in detailed balance enforcement after exothermic reaction
};
class OUTPUT : public Managed
{
public:
//declares the variables associated with the sampling and output
int NSAMP,NMISAMP,NOUT,NDISSOC,NRECOMB,NTSAMP;
//int NDISSL[201];
int *NDISSL;
OUTPUT(){
hipMallocManaged(&NDISSL,201*sizeof(int));
};
double TISAMP,XVELS,YVELS,AVDTM;
double *COLLS,*WCOLLS,*CLSEP,*SREAC,*STEMP,*TRANSTEMP,*ROTTEMP,*VIBTEMP,*ELTEMP;
double **VAR,**VARS,**CSSS,**SUMVIB;
double ***CS,***VARSP,***VIBFRAC;
double ****CSS;
void d_allocate(int x, double *&arr){
hipMallocManaged(&arr, x*sizeof(double));
}
void d_allocate(int x, int y, double **&arr){
hipMallocManaged(&arr, x*sizeof(double));
for(int i =0; i< x; ++i)
hipMallocManaged(&arr[i], y*sizeof(double));
}
void d_allocate(int x, int y, int z, double ***&arr){
hipMallocManaged(&arr, x*sizeof(double));
for (int i = 0; i < x; ++i)
{
hipMallocManaged(&arr[i], y*sizeof(double));
for (int j = 0; j < y; ++j)
hipMallocManaged(&arr[i][j], z*sizeof(double));
}
}
void d_allocate(int x, int y, int z, int w, double ****&arr){
hipMallocManaged(&arr, x*sizeof(double));
for (int i = 0; i < x; ++i)
{
hipMallocManaged(&arr[i], y*sizeof(double));
for (int j = 0; j < y; ++j)
{
hipMallocManaged(&arr[i][j], z*sizeof(double));
for(int k=0; k<z; ++k)
hipMallocManaged(&arr[i][j][k], w*sizeof(double));
}
}
}
//NSAMP the number of samples
//TISAMP the time at which the sampling was last reset
//MNISAMP the number of molecules at the last reset
//AVDTM the average value of DTM in the cells
//NOUT the number of output intervals
//COLLS(N) total number of collisions in sampling cell N
//WCOLLS(N) total weighted collisins in N
//CLSEP(N) sum of collision pair separation in cell N
//CS(0,N,L) sampled number of species L in cell N
//CS(1,N,L) sampled weighted number of species L in cell N
//--all the following CS are weighted sums
//CS(2,N,L), CS(3,N,L), CS(4,N,L) sampled sum of u, v, w
//CS(5,N,L), CS(6,N,L), CS(7,N,L) sampled sum of u*u, v*v, w*w
//CS(8,N,L) sampled sum of rotational energy of species L in cell N
//CS(9,N,L) sampled sum of electronic energy of species L in cell N
//CS(9+K,N,L) sampled sum of vibrational level of species L in cell N
// K is the mode
//
//in CSS, M=1 for incident molecules and M=2 for reflected molecules
//J=1 for surface at x=XB(1), 2 for surface at x=XB(2)
//
//CSS(0,J,L,M) number sum of molecules of species L
//CSS(1,J,L,M) weighted number sum of molecules of species L
//--all the following CSS are weighted
//CSS(2,J,L,M) normal momentum sum to surface
//CSS(3,J,L,M) y momentum sum to surface
//CSS(4,J,L,M) z momentum sum to surface
//CSS(5,J,L,M) tranlational energy sum to surface
//CSS(6,J,L,M) rotational energy sum to surface
//CSS(7,J,L,M) vibrational energy sum to the surface
//CSS(8,J,L,M) electronic energy sum to the surface
//
//CSSS(1,J) weighted sum (over incident AND reflected molecules) of 1/normal vel. component
//--all the following CSSS are weighted
//CSSS(2,J) similar sum of molecular mass / normal vel. component
//CSSS(3,J) similar sum of molecular mass * parallel vel. component / normal vel. component
//CSSS(4,J) similar sum of molecular mass * speed squared / normal vel. component
//CSSS(5,J) similar sum of rotational energy / normal vel. component
//CSSS(6,J) similar sum of rotational degrees of freedom /normal velocity component
//
//SREAC(N) the number of type N surface reactions
//
//VAR(M,N) the flowfield properties in cell N
//M=1 the x coordinate
//M=2 sample size
//M=3 number density
//M=4 density
//M=5 u velocity component
//M=6 v velocity component
//M=7 w velocity component
//M=8 translational temperature
//M=9 rotational temperature
//M=10 vibrational temperature
//M=11 temperature
//M=12 Mach number
//M=13 molecules per cell
//M=14 mean collision time / rate
//M=15 mean free path
//M=16 ratio (mean collisional separation) / (mean free path)
//M=17 flow speed
//M=18 scalar pressure nkT
//M=19 x component of translational temperature TTX
//M=20 y component of translational temperature TTY
//M=21 z component of translational temperature TTZ
//M=22 electronic temperature
//
//VARSP(M,N,L) the flowfield properties for species L in cell N
//M=0 the sample size
//M=1 the fraction
//M=2 the temperature component in the x direction
//M=3 the temperature component in the y direction
//M=4 the temperature component in the z direction
//M=5 the translational temperature
//M=6 the rotational temperature
//M=7 the vibrational temperature
//M=8 the temperature
//M=9 the x component of the diffusion velocity
//M=10 the y component of the diffusion velocity
//M=11 the z component of the diffusion velocity
//M=12 the electronic temperature
//
//VARS(N,M) surface property N on interval L of surface M
//
//N=0 the unweighted sample (remainder of variables are weighted for cyl. and sph. flows)
//N=1 the incident sample
//N=2 the reflected sample
//N=3 the incident number flux
//N=4 the reflected number flux
//N=5 the incident pressure
//N=6 the reflected pressure
//N=7 the incident parallel shear tress
//N=8 the reflected parallel shear stress
//N=9 the incident normal-to-plane shear stress
//N=10 the reflected normal shear stress
//N=11 the incident translational heat flux
//N=12 the reflected translational heat fluc
//N=13 the incident rotational heat flux
//N=14 the reflected rotational heat flux
//N=15 the incident vibrational heat flux
//N=16 the reflected vibrational heat flux
//N=17 the incident heat flux from surface reactions
//N=18 the reflected heat flux from surface reactions
//N=19 slip velocity
//N=20 temperature slip
//N=21 rotational temperature slip
//N=22 the net pressure
//N=23 the net parallel in-plane shear
//N=24 the net parallel normal-to-plane shear
//N=25 the net translational energy flux
//N=26 the net rotational heat flux
//N=27 the net vibrational heat flux
//N=28 the heat flux from reactions
//N=29 total incident heat transfer
//N=30 total reflected heat transfer
//N=31 net heat transfer
//N=32 surface temperature --not implemented
//N=33 incident electronic energy
//N=34 reflected electronic energy
//N=35 net electronic energy
//N=35+K the percentage of species K
//
//COLLS(N) the number of collisions in sampling cell N
//WCOLLS(N) weighted number
//CLSEP(N) the total collision partner separation distance in sampling cell N
//
//VIBFRAC(L,K,M) the sum of species L mode K in level M
//SUMVIB(L,K) the total sample in VIBFRAC
//
//THE following variables apply in the sampling of distribution functions
//(some are especially for the dissociation of oxygen
//
//NDISSOC the number of dissociations
//NRECOMB the number of recombinations
//NDISSL(L) the number of dissociations from level
//NTSAMP the number of temperature samples
//STEMP(L) the temperature of species L
//TRANSTEMP(L) the translational temperature of species N
//ROTTEMP(L) rotational temperature of species N
//VIBTEMP(L) vibrational temperature of species N
//ELTEMP(L) electronic temperature of species N
//
};
class GEOM_1D : public Managed
{
public:
//declares the variables associated with the flowfield geometry and cell structure
//for homogeneous gas and one-dimensional flow studies
int NCELLS,NCCELLS,NCIS,NDIV,MDIV,ILEVEL,IFX,JFX,IVB,IWF;
//int ITYPE[3];
int *ITYPE;
int *ICELL;
int ** ICCELL,**JDIV;
void i_allocate(int x, int *&arr){
hipMallocManaged(&arr, x*sizeof(int));
}
void i_allocate(int x, int y, int **&arr){
hipMallocManaged(&arr, x*sizeof(int));
for(int i =0; i< x; ++i)
hipMallocManaged(&arr[i], y*sizeof(int));
}
double DDIV,XS,VELOB,WFM,AWF,FREM,XREM;
//double XB[3];
double *XB;
double **CELL,**CCELL;
void d_allocate(int x, int y, double**&arr){
hipMallocManaged(&arr, x*sizeof(double));
for(int i =0; i< x; ++i)
hipMallocManaged(&arr[i], y*sizeof(double));
}
GEOM_1D(){
hipMallocManaged(&ITYPE, 3*sizeof(int));
hipMallocManaged(&XB, 3*sizeof(double));
}
//
//XB(1), XB(2) the minimum, maximum x coordinate
//DDIV the width of a division
//ITYPE(K) the tpe of boundary at the minimum x (K=1) and maximum x (K=2) boundaries
// 0 for a stream boundary
// 1 for a plane of symmetry
// 2 for a solid surface
// 3 for a vacuum
//NCELLS the number of sampling cells
//NCCELLS the number of collision cells
//NCIS the number of collision cells in a sampling cell
// MDIV the maximum number of sampling cell divisions at any level of subdivision
//IVB 0,1 for stationary, moving outer boundary
//IWF 0 for no radial weighting factors, 1 for radial weighting factors
//WFM, set in data as the maximum weighting factor, then divided by the maximum radius
//AWF overall ratio of real to weighted molecules
//VELOB the speed of the outer boundary
//ILEV level of subdivision in adaption (0 before adaption)
//JDIV(N,M) (-cell number) or (start address -1 in JDIV(N+1,M), where M is MDIV
//IFX 0 for plane flow, 1 for cylindrical flow, 3 for spherical flow
//JFX IFX+1
//CELL(M,N) information on sampling cell N
// M=1 x coordinate
// M=2 minimum x coordinate
// M=3 maximum x cooedinate
// M=4 volume
//ICELL(N) number of collision cells preceding those in sampling cell N
//CCELL(M,N) information on collision cell N
// M=1 volume
// M=2 remainder in collision counting
// M=3 half the allowed time step
// M=4 maximum value of product of cross-section and relative velocity
// M=5 collision cell time
//ICCELL(M,N) integer information on collision cell N
// M=1 the (start address -1) in ICREF of molecules in collision cell N
// M=2 the number of molecules in collision cell N
// M=3 the sampling cell in which the collision cell lies
//FREM fraction of molecule removal
//XREM the coordinate at which removal commences
//
};
clock_t start;
fstream file_9;
fstream file_18;
CALC *calc = new CALC;
GAS *gas = new GAS;
MOLECS *molecs = new MOLECS;
GEOM_1D *geom = new GEOM_1D;
OUTPUT *output =new OUTPUT;
template <typename T>
string to_string(T value)
{
std::ostringstream os ;
os << value ;
return os.str() ;
}
int main()
{
// //CALC calc;
// //MOLECS molecs;
// //GAS gas;
// //OUTPUT output;
// //GEOM_1D geom;
//
// IMPLICIT NONE\
//
int IRUN,ICONF,N,M,IADAPT,IRETREM,ISET;
double A;
//
fstream file_7;
calc->NVER=1; //for major changes, e.g. to basic architecture
calc->MVER=1 ; //significant changes, but must change whenever the data in a DSnD.DAT file changes
calc->NREL=1 ; //the release number
//
//***********************
//set constants
// PI=3.1415926535897932D00
// DPI=6.283185307179586D00
// SPI=1.772453850905516D00
// BOLTZ=1.380658D-23
// AVOG=6.022169D26
//***********************
//
//*************************************************
//**** ADJUSTABLE COMPUTATIONAL PARAMETERS ****
//*************************************************
//
calc->NMCC=15; //DEFAULT=15--desired number of simulated molecules in a collision cell
//
calc->CPDTM=0.2; //DEFAULT=0.2--fraction of the local mean collision time that is the desired maximum time step
//
calc->TPDTM=0.5 ; //DEFAULT=0.5--the fraction or multiple of a sampling cell transit time that is the desired maximum time step
//
calc->NNC=1; //DEFAULT=0--0 to select collision partner randomly from collision cell, 1 for nearest-neighbor collisions
//
calc->SAMPRAT=5; //DEFAULT=5--the number of time steps in a sampling interval
//
calc->OUTRAT=10; //50 //DEFAULT=50--the number of flow samples in a file output interval
//
calc->FRACSAM=0.5; //0.5 //DEFAULT=0.5--fraction of the output interval interval over which a time-averaged sample is taken in an unsteady flow
//
calc->ISAD=0; //DEFAULT=0--0,1 to not adapt, to adapt cells automatically at start of output interval in an unsteady flow (not yet implemented)
//
calc->IMTS=2; //DEFAULT=0--0 to set the move time step to the instantaneous overall time step that changes with time
// 1 to use a cell dependent collision time
// 2 to keep the time step fixed at the initial value
//
calc->FNUMF=1; //DEFAULT=1--adjustment factor to the automatically generated value for the number of real molecules
// that are represented by each simulated molecule.
// (The adjustment may be large because the automatic setting assumes that the whole flowfield is at the stream conditions.)
//
//automatic adjustments may be applied for some application classes (e.g homogeneous gas studies)
//
calc->TLIM=1.e-5; //DEFAULT=1.D20 sets an indefinite run - set if a define STOP time is required
//
//************************************************
//
//open a diagnostic file and check whether an instance of the program is already running
//
// fstream file_9;
cout<<"DSMC PROGRAM"<<endl;
file_9.open("DIAG.TXT", ios::trunc | ios::out);
if(file_9.is_open()){
file_9<<"File DIAG.TXT has been opened"<<endl;
cout<<"File DIAG.TXT has been opened"<<endl;
}
else{
cout<<"Stop the DS1.EXE that is already running and try again"<<endl;
//return 0;
}
// OPEN (9,FILE='DIAG.TXT',FORM='FORMATTED',STATUS='REPLACE')
// WRITE (9,*,IOSTAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'Stop the DS1.EXE that is already running and try again'
// STOP
// ELSE
// WRITE (9,*) 'File DIAG.TXT has been opened'
// END IF
//
//open a molecule number history file
//OPEN (13,FILE='MolNum.DAT',FORM='FORMATTED',STATUS='REPLACE')
//
//initialise run variables
IRUN=0;
geom->IVB=0; //will be reset to 1 by restart program if there is a moving wall
//
while((IRUN < 1) || (IRUN > 2)){
cout<< "DSMC Version" <<calc->NVER<<'.'<<calc->MVER<<'.'<<calc->NREL<<endl;
cout<< "enter 1 to continue a current run"<<endl;
cout<< "enter 2 to start a new run :-"<<endl;
//
cin>> IRUN;
}
if(IRUN == 1) file_9<< "Continuing an existing run"<<endl;//WRITE (9,*) 'Continuing an existing run'
if(IRUN == 2) {
cout<< "Enter 1 to confirm, 0 to continue current run :-"<<endl;
cin>> ICONF;
if(ICONF == 1)
file_9<<"Starting a new run"<<endl;//WRITE (9,*) 'Starting a new run'
else{
IRUN=1;
file_9<<"Continuing an existing run"<<endl;
//WRITE (9,*) 'Continuing an existing run'
}
}
//
if(IRUN == 2){ //new run
cout<< "Enter 0 for a homogeneous gas, or"<<endl;
cout<< "Enter 1 for a one-dimensional flow, or"<<endl;
cout<< "Enter 2 for a two-dimensional plane flow, or"<<endl;
cout<< "Enter 3 for a three dimensional flow, or"<<endl;
cout<< "enter 4 for an axially-symmetric flow :-"<<endl;
cin>> calc->ICLASS;
calc->NCLASS=2; //default 2D
if(calc->ICLASS < 2) calc->NCLASS=1; //0D or 1D
if(calc->ICLASS == 3) calc->NCLASS=3; //3D
cout<<"Enter 0 for an eventually steady flow, or"<<endl;
cout<<"enter 1 for a continuing unsteady flow :-"<<endl;
cin>> calc->ISF;
file_7.open("RUN_CLASS.TXT", ios::trunc |ios::out);
if(file_7.is_open()){
cout<<"RUN_CLASS.TXT is opened"<<endl;
}
else{
cout<<"RUN_CLASS.TXT not opened"<<endl;
cin.get();
}
file_7<<calc->ICLASS<<calc->ISF;
file_7.close();
// OPEN (7,FILE='RUN_CLASS.TXT',FORM='FORMATTED',STATUS='REPLACE')
// WRITE (7,*) ICLASS,ISF
// CLOSE (7)
file_9<<"Starting a new run with ICLASS, ISF "<<calc->ICLASS<<" "<<calc->ISF<<endl;
// WRITE (9,*) 'Starting a new run with ICLASS, ISF',ICLASS,ISF
cout<<"Starting a new run with ICLASS, ISF "<<calc->ICLASS<<" "<<calc->ISF<<endl;
}
//
if(IRUN == 1){ //continued run
file_7.open("RUN_CLASS.TXT" , ios::in );
if(file_7.is_open()){
cout<<"RUN_CLASS.TXT is opened"<<endl;
}
else{
cout<<"RUN_CLASS.TXT not opened"<<endl;
cin.get();
}
file_7 >>calc->ICLASS>>calc->ISF;
file_7.close();
// OPEN (7,FILE='RUN_CLASS.TXT',FORM='FORMATTED',STATUS='OLD')
// READ (7,*) ICLASS,ISF
// CLOSE(7)
READ_RESTART();
//
calc->TSAMP=calc->FTIME+calc->DTSAMP;
calc->TOUT=calc->FTIME+calc->DTOUT;
if((gas->MEX > 0) && (calc->ISF == 1)){
cout<<"Enter 0 to continue the reaction sample or"<<endl;
cout<<"enter 1 to continue with a new reaction sample :-"<<endl;
cin>> N;
if(N == 1){
//memset(gas->TNEX,0.e00,sizeof(*gas->TNEX));
//memset(calc->TDISS,0.e00,sizeof(*calc->TDISS));
//memset(calc->TRECOMB,0.e00,sizeof(*calc->TRECOMB));
for(int i=0;i<gas->MEX+1;i++)
gas->TNEX[i]= 0.e00;
for(int i=0;i<gas->MSP+1;i++)
calc->TDISS[i]=0.e00;
for(int i=0;i<gas->MSP+1;i++)
calc->TRECOMB[i]=0.e00;
}
}
//
if((calc->ISAD == 0) && (calc->ISF == 0)){
cout<<"Enter 0 to continue the current sample or"<<endl;
cout<<"enter 1 to continue with a new sample :-"<<endl;
cin>> N;
if(N == 1){
if((geom->ITYPE[2] == 4) && (calc->ICN == 0)){
cout<<"Enter 0 to continue to not enforce constant molecule number"<<endl;
cout<<"enter 1 to start to enforce constant molecule number :-"<<endl;
cin>> M;
if(M == 1) calc->ICN=1;
}
cout<<"Enter 1 to adapt the cells, or 0 to continue with current cells:-"<<endl;
cin>>IADAPT;
if(IADAPT == 1){
cout<<"Adapting cells"<<endl;
ADAPT_CELLS_1D() ;
INDEX_MOLS();
WRITE_RESTART();
}
else
cout<<"Continuing with existing cells"<<endl;
//
if(calc->IREM == 2){
cout<<"Enter 1 to reset the removal details, or 0 to continue with current details:-"<<endl;
cin>>IRETREM;
if(IRETREM == 1){
geom->FREM=-1.e00;
while((geom->FREM < -0.0001) || (geom->FREM > 5.0)){
cout<<"Enter the fraction of entering molecules that are removed:-"<<endl;
cin>>geom->FREM;
cout<<"The ratio of removed to entering mlecules is \t"<<geom->FREM<<endl;
// WRITE (*,999) FREM
}
file_9<<"The ratio of removed to entering mlecules is \t"<<geom->FREM<<endl;
// WRITE (9,999) FREM
// 999 FORMAT (' The ratio of removed to entering molecules is ',G15.5)
if(geom->FREM > 1.e-10){
geom->XREM=geom->XB[1]-1.0;
while((geom->XREM < geom->XB[1]-0.0001) || (geom->XREM > geom->XB[2]+0.0001)){
cout<<"Enter x coordinate of the upstream removal limit:-"<<endl;
cin>>geom->XREM;
cout<<"The molecules are removed from \t"<<geom->XREM<<" to "<<geom->XB[2]<<endl; //988
// WRITE (*,998) XREM,XB(2)
}
file_9<<"The molecules are removed from \t"<<geom->XREM<<" to "<<geom->XB[2]<<endl;
// WRITE (9,998) XREM,XB(2)
// 998 FORMAT (' The molecules are removed from ',G15.5,' to',G15.5)
}
}
}
//
INITIALISE_SAMPLES();
}
}
}
//
if(IRUN == 2){
//
READ_DATA();
//
if(calc->ICLASS < 2) SET_INITIAL_STATE_1D();
//
if(calc->ICLASS == 0) ENERGY(0,A);
//
WRITE_RESTART();
//
}
//
while(calc->FTIME < calc->TLIM){
//
//
calc->FTIME=calc->FTIME+calc->DTM;
//
file_9<<" TIME "<<setw(20)<<setprecision(10)<<calc->FTIME<<" NM "<<molecs->NM<<" COLLS "<<std::left<<setw(20)<<setprecision(10)<<calc->TOTCOL<<endl;
// WRITE (9,*) 'TIME',FTIME,' NM',NM,' COLLS',TOTCOL
cout<< " TIME "<<setw(20)<<setprecision(10)<<calc->FTIME<<" NM "<<molecs->NM<<" COLLS "<<std::left<<setw(20)<<setprecision(10)<<calc->TOTCOL<<endl;
//
// WRITE (13,*) FTIME/TNORM,FLOAT(NM)/FLOAT(NMI) //uncomment if a MOLFILE.DAT is to be generated
//
// WRITE (*,*) 'MOVE'
//cout<<"MOVE"<<endl;
MOLECULES_MOVE_1D();
//
if((geom->ITYPE[1] == 0) || (geom->ITYPE[2] == 0) || (geom->ITYPE[2] == 4)) MOLECULES_ENTER_1D();
//
// WRITE (*,*) 'INDEX'
//ut<<"INDEX"<<endl;
// cout<<calc->TOUT<<endl;
// cin.get();
INDEX_MOLS();
//
// WRITE (*,*) 'COLLISIONS'
COLLISIONS();
//
// if(gas->MMVM > 0) {
// cout<<"DISSOCIATION"<<endl;
// DISSOCIATION();
// }
//
if(calc->FTIME > calc->TSAMP){
// WRITE (*,*) 'SAMPLE'
if(calc->ISF == 0) SAMPLE_FLOW();
if((calc->ISF == 1) && (calc->FTIME < calc->TPOUT+(1.e00-calc->FRACSAM)*calc->DTOUT)){
calc->TSAMP=calc->TSAMP+calc->DTSAMP;
INITIALISE_SAMPLES();
}
if((calc->ISF == 1) && (calc->FTIME >= calc->TPOUT+(1.e00-calc->FRACSAM)*calc->DTOUT)) SAMPLE_FLOW();
}
//
if(calc->FTIME > calc->TOUT){
cout<<"writing OUTPUT"<<endl;
// WRITE (*,*) 'OUTPUT'
WRITE_RESTART();
//
OUTPUT_RESULTS();
calc->TPOUT=calc->FTIME;
}
//
}
return 0;
//
}
void ALLOCATE_GAS()
{
// //GAS gas;
// //CALC calc;
gas->d_allocate(gas->MSP+1,3,gas->FSP);
gas->d_allocate(7,gas->MSP+1,gas->SP);
gas->d_allocate(4,gas->MSP+1,gas->SPR);
gas->d_allocate(9,gas->MSP+1,gas->MSP,gas->SPM);
gas->i_allocate(3,gas->MSP+1,gas->ISPR);
gas->i_allocate(gas->MSP+1,gas->ISPV);
gas->d_allocate(7,gas->MSP+1,3,gas->ENTR);
gas->d_allocate(gas->MSP+1,3,gas->VMP);
calc->d_allocate(gas->MSP+1,calc->VNMAX);
gas->d_allocate(gas->MSP+1,gas->CR);
calc->d_allocate(gas->MSP+1,gas->MSP+1,calc->TCOL);
gas->i_allocate(gas->MSP+1,gas->MSP+1,gas->ISPRC);
gas->i_allocate(gas->MSP+1,gas->MSP+1,gas->ISPRK);
gas->d_allocate(5,gas->MSP+1,gas->MSP+1,gas->MSP+1,gas->SPRC);
gas->i_allocate(gas->MSP+1,gas->NELL);
gas->d_allocate(4,gas->MELE+1,gas->MSP+1,gas->QELC);
gas->d_allocate(3,gas->MSP+1,gas->MSP+1,gas->MVIBL+1,gas->SPRP);
gas->d_allocate(3,gas->MSP+1,gas->MSP+1,gas->SPRT);
calc->d_allocate(gas->MSP+1,calc->AJM);
gas->d_allocate(gas->MSP+1,gas->FP);
calc->d_allocate(gas->MSP+1,calc->ALOSS);
calc->d_allocate(gas->MSP+1,calc->EME);
/*ALLOCATE (FSP(MSP,2),SP(6,MSP),SPR(3,MSP),SPM(8,MSP,MSP),ISPR(2,MSP),ISPV(MSP),ENTR(6,MSP,2), &
VMP(MSP,2),VNMAX(MSP),CR(MSP),TCOL(MSP,MSP),ISPRC(MSP,MSP),ISPRK(MSP,MSP),SPRC(4,MSP,MSP,MSP), &
NELL(MSP),QELC(3,MELE,MSP),SPRP(2,MSP,MSP,0:MVIBL),SPRT(2,MSP,MSP),AJM(MSP),FP(MSP), &
ALOSS(MSP),EME(MSP),STAT=ERROR)
//
IF (ERROR /= 0) THEN
WRITE (*,*)'PROGRAM COULD NOT ALLOCATE SPECIES VARIABLES',ERROR
END IF
//*/
gas->i_allocate(gas->MMEX+1,gas->MSP+1,gas->MSP+1,gas->NEX);
gas->i_allocate(gas->MSP+1,gas->MSP+1,gas->NSPEX);
gas->d_allocate(7,gas->MMEX+1,gas->MSP+1,gas->MSP+1,gas->SPEX);
gas->i_allocate(gas->MMEX+1,8,gas->MSP+1,gas->MSP+1,gas->ISPEX);
gas->i_allocate(5,gas->MSP+1,gas->TREACG);
gas->d_allocate(gas->MMEX+1,gas->PSF);
gas->i_allocate(5,gas->MSP+1,gas->TREACL);
gas->d_allocate(gas->MEX+1,gas->TNEX);
gas->d_allocate(3,gas->MMEX+1,gas->MSP+1,gas->MSP+1,gas->MVIBL+1,gas->SPREX);
gas->i_allocate(3,gas->MSP+1,gas->NSLEV);
gas->d_allocate(gas->MSP+1,gas->SLER);
// ALLOCATE (NEX(MMEX,MSP,MSP),NSPEX(MSP,MSP),SPEX(6,MMEX,MSP,MSP),ISPEX(MMEX,7,MSP,MSP),TREACG(4,MSP), &
// PSF(MMEX),TREACL(4,MSP),TNEX(MEX),SPREX(2,MMEX,MSP,MSP,0:MVIBL),NSLEV(2,MSP),SLER(MSP),STAT=ERROR)
// //
// IF (ERROR /= 0) THEN
// WRITE (*,*)'PROGRAM COULD NOT ALLOCATE Q-K REACTION VARIABLES',ERROR
// END IF
// //
if(gas->MMVM >= 0){
gas->d_allocate(6,gas->MMVM+1,gas->MSP+1,gas->SPVM);
gas->i_allocate(3,gas->MMVM+1,gas->MSP+1,gas->ISPVM);
calc->d_allocate(gas->MSP+1,calc->TDISS);
calc->d_allocate(gas->MSP+1,calc->TRECOMB);
//ALLOCATE (SPVM(5,MMVM,MSP),ISPVM(2,MMVM,MSP),TDISS(MSP),TRECOMB(MSP),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*)'PROGRAM COULD NOT ALLOCATE VIBRATION VARIABLES',ERROR
}
//N.B. surface reactions are not yet implemented
if(gas->MNSR > 0){
gas->d_allocate(gas->MNSR+1,gas->ERS);
gas->i_allocate(3,gas->MNSR+1,gas->LIS);
gas->i_allocate(7,gas->MNSR+1,gas->LRS);
gas->i_allocate(gas->MNSR+1,gas->MSP+1,gas->ISRCD);
//ALLOCATE (ERS(MNSR),LIS(2,MNSR),LRS(6,MNSR),ISRCD(MNSR,MSP),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*)'PROGRAM COULD NOT ALLOCATE SURFACE REACTION VARIABLES',ERROR
}
//calc->AJM=0.e00;
//memset(calc->AJM,0.e00,sizeof(*calc->AJM));
for(int i=0;i<gas->MSP+1;i++){
calc->AJM[i]=0.e00;
}
return;
}
void HARD_SPHERE()
{
////GAS gas;
////CALC calc;
cout<<"Reading HARD_SPHERE Data"<<endl;
gas->MSP=1;
gas->MMRM=0;
gas->MMVM=0;
gas->MNSR=0;
gas->MEX=0;
gas->MMEX=0;
gas->MELE=1;
gas->MVIBL=0;
ALLOCATE_GAS();
gas->SP[1][1]=4.0e-10; //reference diameter
gas->SP[2][1]=273.0; //reference temperature
gas->SP[3][1]=0.5; //viscosity-temperature index
gas->SP[4][1]=1.0; //reciprocal of VSS scattering parameter (1 for VHS)
gas->SP[5][1]=5.e-26; //mass
gas->ISPR[1][1]=0; //number of rotational degrees of freedom
cout<<"Hard Sphere data done"<<endl;
return;
}
void ARGON()
{
// //GAS gas;
// //CALC calc;
cout<<"Reading Argon Data"<<endl;
gas->MSP=1;
gas->MMRM=0;
gas->MMVM=0;
gas->MNSR=0;
gas->MEX=0;
gas->MMEX=0;
gas->MELE=1;
gas->MVIBL=0;
ALLOCATE_GAS();
gas->SP[1][1]=4.17e-10;
gas->SP[2][1]=273.15;
gas->SP[3][1]=0.81;
gas->SP[4][1]=1.0;
gas->SP[5][1]=6.63e-26;
gas->ISPR[1][1]=0;
gas->ISPR[2][1]=0;
cout<<"Argon Data done"<<endl;
return;
}
//
void IDEAL_NITROGEN()
{
// //GAS gas;
// //CALC calc;
cout<<"Reading IDEAL_NITROGEN data"<<endl;
gas->MSP=1;
gas->MMRM=1;
gas->MMVM=0;
gas->MNSR=0;
gas->MEX=0;
gas->MMEX=0;
gas->MELE=0;
gas->MVIBL=0;
ALLOCATE_GAS();
gas->SP[1][1]=4.17e-10;
gas->SP[2][1]=273.0;
gas->SP[3][1]=0.74;
gas->SP[4][1]=1.0;
gas->SP[5][1]=4.65e-26;
gas->ISPR[1][1]=2;
gas->ISPR[2][1]=0;
gas->SPR[1][1]=5.0;
return;
}
//
void REAL_OXYGEN()
{
//
//GAS gas;
//CALC calc;
cout<<"Reading Real_Oxygen data"<<endl;
gas->MSP=2;
gas->MMRM=1;
gas->MMVM=1;
gas->MNSR=0;
gas->MEX=0;
gas->MMEX=0;
gas->MELE=5;
gas->MVIBL=26;
ALLOCATE_GAS();
gas->SP[1][1]=4.07e-10;
gas->SP[2][1]=273.00;
gas->SP[3][1]=0.77e00;
gas->SP[4][1]=1.e00;
gas->SP[5][1]=5.312e-26;
gas->SP[6][1]=0.e00;
gas->ISPR[1][1]=2;
gas->ISPR[2][1]=0 ; //0,1 for constant,polynomial rotational relaxation collision number
gas->SPR[1][1]=5.0; // the collision number or the coefficient of temperature in the polynomial (if a polynomial, the coeff. of T^2 is in spr_db(3 )
gas->ISPV[1]=1 ; // the number of vibrational modes
gas->SPVM[1][1][1]=2256.e00 ; // the characteristic vibrational temperature
gas->SPVM[2][1][1]=90000.e00; // a constant Zv, or the reference Zv
gas->SPVM[3][1][1]=2256.e00; // -1 for a constant Zv, or the reference temperature
gas->SPVM[5][1][1]=1.0; //arbitrary reduction factor
gas->ISPVM[1][1][1]=2;
gas->ISPVM[2][1][1]=2;
gas->NELL[1]=3;
if(gas->MELE > 1){
//******
gas->QELC[1][1][1]=3.0;
gas->QELC[2][1][1]=0.0;
gas->QELC[3][1][1]=50.0; //500.
gas->QELC[1][2][1]=2.0;
gas->QELC[2][2][1]=11393.0;
gas->QELC[3][2][1]=50.0; //500 //for equipartition, the cross-section ratios must be the same for all levels
gas->QELC[1][3][1]=1.0;
gas->QELC[2][3][1]=18985.0;
gas->QELC[3][3][1]=50.0; //500.
}
//
//species 2 is atomic oxygen
gas->SP[1][2]=3.e-10;
gas->SP[2][2]=273.e00;
gas->SP[3][2]=0.8e00;
gas->SP[4][2]=1.e00;
gas->SP[5][2]=2.656e-26;
gas->SP[6][2]=4.099e-19;
gas->ISPR[1][2]=0;
gas->ISPV[2]=0; //must be set//
//set electronic information
if(gas->MELE > 1){
gas->NELL[2]=5;
gas->QELC[1][1][2]=5.0;
gas->QELC[2][1][2]=0.0;
gas->QELC[3][1][2]=50.0;
gas->QELC[1][2][2]=3.0;
gas->QELC[2][2][2]=228.9;
gas->QELC[3][2][2]=50.0;
gas->QELC[1][3][2]=1.0;
gas->QELC[2][3][2]=325.9;
gas->QELC[3][3][2]=50.0;
gas->QELC[1][4][2]=5.0;
gas->QELC[2][4][2]=22830.0;
gas->QELC[3][4][2]=50.0;
gas->QELC[1][5][2]=1.0;
gas->QELC[2][5][2]=48621.0;
gas->QELC[3][5][2]=50.0;
}
//set data needed for recombination
//
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->ISPRC[i][j]=0;
gas->ISPRK[i][j]=0;
}
}
// gas->ISPRC=0;
// gas->ISPRK=0;
gas->ISPRC[2][2]=1; //O+O -> O2 recombined species code for an O+O recombination
gas->ISPRK[2][2]=1 ; //the relevant vibrational mode of this species
gas->SPRC[1][2][2][1]=0.04;
gas->SPRC[2][2][2][1]=-1.3;
gas->SPRC[1][2][2][2]=0.05;
gas->SPRC[2][2][2][2]=-1.1;
gas->SPRT[1][2][2]=5000.e00;
gas->SPRT[2][2][2]=15000.e00;
//
//memset(gas->NSPEX,0,sizeof(**gas->NSPEX));
//memset(gas->SPEX,0.e00,sizeof(****gas->SPEX));
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->NSPEX[i][j]=0;
}
}
for(int i=0;i<7;i++){
for(int j=0;j<gas->MMEX+1;j++){
for(int k=0;k<gas->MSP+1;k++){
for(int l=0;l<gas->MSP+1;l++)
gas->SPEX[i][j][k][l]=0.e00;
}
}
}
//gas->SPEX=0.e00;
gas->ISPEX=0;
//
DERIVED_GAS_DATA();
//
cout<<"Real_Oxygen data done"<<endl;
return;
}
//
void IDEAL_AIR()
{
//GAS gas;
//CALC calc;
cout<<"Reading IDEAL_AIR data"<<endl;
gas->MSP=2;
gas->MMRM=1;
gas->MMVM=0;
gas->MNSR=0;
gas->MEX=0;
gas->MMEX=0;
gas->MELE=1;
gas->MVIBL=0;
//
ALLOCATE_GAS();
//
gas->SP[1][1]=4.07e-10;
gas->SP[2][1]=273.0;
gas->SP[3][1]=0.77;
gas->SP[4][1]=1.0;
gas->SP[5][1]=5.312e-26;
gas->ISPR[1][1]=2;
gas->ISPR[2][1]=0;
gas->SPR[1][1]=5.0;
gas->SP[1][2]=4.17e-10;
gas->SP[2][2]=273.0;
gas->SP[3][2]=0.74;
gas->SP[4][2]=1.0;
gas->SP[5][2]=4.65e-26;
gas->ISPR[1][2]=2;
gas->ISPR[2][2]=0;
gas->SPR[1][2]=5.0;
cout<<"IDEAL_AIR data done"<<endl;
return;
}
//
void REAL_AIR()
{
//GAS gas;
//CALC calc;
cout<<"REAL_AIR data done"<<endl;
gas->MSP=5;
gas->MMRM=1;
gas->MMVM=1;
gas->MELE=5;
gas->MVIBL=40; //?
//
gas->MEX=4;
gas->MMEX=1;
//
gas->MNSR=0;
ALLOCATE_GAS();
//species 1 is oxygen
gas->SP[1][1]=4.07e-10;
gas->SP[2][1]=273.e00;
gas->SP[3][1]=0.77e00;
gas->SP[4][1]=1.e00;
gas->SP[5][1]=5.312e-26;
gas->SP[6][1]=0.e00;
gas->ISPR[1][1]=2;
gas->ISPR[2][1]=0;
gas->SPR[1][1]=5.e00;
gas->ISPV[1]=1; // the number of vibrational modes
gas->SPVM[1][1][1]=2256.e00; // the characteristic vibrational temperature
gas->SPVM[2][1][1]=18000.e00; //90000.D00 // a constant Zv, or the reference Zv
gas->SPVM[3][1][1]=2256.e00; // -1 for a constant Zv, or the reference temperature
gas->SPVM[5][1][1]=1.0;
gas->ISPVM[1][1][1]=3;
gas->ISPVM[2][1][1]=3;
gas->NELL[1]=3;
gas->QELC[1][1][1]=3.0;
gas->QELC[2][1][1]=0.0;
gas->QELC[3][1][1]=50.0;
gas->QELC[1][2][1]=2.0;
gas->QELC[2][2][1]=11393.0;
gas->QELC[3][2][1]=50.0;
gas->QELC[1][3][1]=1.0;
gas->QELC[2][3][1]=18985.0;
gas->QELC[3][3][1]=50.0;
//species 2 is nitrogen
gas->SP[1][2]=4.17e-10;
gas->SP[2][2]=273.e00;
gas->SP[3][2]=0.74e00;
gas->SP[4][2]=1.e00;
gas->SP[5][2]=4.65e-26;
gas->SP[6][2]=0.e00;
gas->ISPR[1][2]=2;
gas->ISPR[2][2]=0;
gas->SPR[1][2]=5.e00;
gas->ISPV[2]=1;
gas->SPVM[1][1][2]=3371.e00;
gas->SPVM[2][1][2]=52000.e00; //260000.D00
gas->SPVM[3][1][2]=3371.e00;
gas->SPVM[5][1][2]=0.3;
gas->ISPVM[1][1][2]=4;
gas->ISPVM[2][1][2]=4;
gas->NELL[2]=1;
gas->QELC[1][1][2]=1.0;
gas->QELC[2][1][2]=0.0;
gas->QELC[3][1][2]=100.0;
//species 3 is atomic oxygen
gas->SP[1][3]=3.e-10;
gas->SP[2][3]=273.e00;
gas->SP[3][3]=0.8e00;
gas->SP[4][3]=1.e00;
gas->SP[5][3]=2.656e-26;
gas->SP[6][3]=4.099e-19;
gas->ISPR[1][3]=0;
gas->ISPV[3]=0;
gas->NELL[3]=5;
gas->QELC[1][1][3]=5.0;
gas->QELC[2][1][3]=0.0;
gas->QELC[3][1][3]=50.0;
gas->QELC[1][2][3]=3.0;
gas->QELC[2][2][3]=228.9;
gas->QELC[3][2][3]=50.0;
gas->QELC[1][3][3]=1.0;
gas->QELC[2][3][3]=325.9;
gas->QELC[3][3][3]=50.0;
gas->QELC[1][4][3]=5.0;
gas->QELC[2][4][3]=22830.0;
gas->QELC[3][4][3]=50.0;
gas->QELC[1][5][3]=1.0;
gas->QELC[2][5][3]=48621.0;
gas->QELC[3][5][3]=50.0;
//species 4 is atomic nitrogen
gas->SP[1][4]=3.e-10;
gas->SP[2][4]=273.e00;
gas->SP[3][4]=0.8e00;
gas->SP[4][4]=1.0e00;
gas->SP[5][4]=2.325e-26;
gas->SP[6][4]=7.849e-19;
gas->ISPR[1][4]=0;
gas->ISPV[4]=0;
gas->NELL[4]=3;
gas->QELC[1][1][4]=4.0;
gas->QELC[2][1][4]=0.0;
gas->QELC[3][1][4]=50.0;
gas->QELC[1][2][4]=10.0;
gas->QELC[2][2][4]=27658.0;
gas->QELC[3][2][4]=50.0;
gas->QELC[1][3][4]=6.0;
gas->QELC[2][3][4]=41495.0;
gas->QELC[3][3][4]=50.0;
//species 5 is NO
gas->SP[1][5]=4.2e-10;
gas->SP[2][5]=273.e00;
gas->SP[3][5]=0.79e00;
gas->SP[4][5]=1.0e00;
gas->SP[5][5]=4.98e-26;
gas->SP[6][5]=1.512e-19;
gas->ISPR[1][5]=2;
gas->ISPR[2][5]=0;
gas->SPR[1][5]=5.e00;
gas->ISPV[5]=1;
gas->SPVM[1][1][5]=2719.e00;
gas->SPVM[2][1][5]=14000.e00; //70000.D00
gas->SPVM[3][1][5]=2719.e00;
gas->SPVM[5][1][5]=0.2;
gas->ISPVM[1][1][5]=3;
gas->ISPVM[2][1][5]=4;
gas->NELL[5]=2;
gas->QELC[1][1][5]=2.0;
gas->QELC[2][1][5]=0.0;
gas->QELC[3][1][5]=50.0;
gas->QELC[1][2][5]=2.0;
gas->QELC[2][2][5]=174.2;
gas->QELC[3][2][5]=50.0;
//set the recombination data for the molecule pairs
//memset(gas->ISPRC,0,sizeof(**gas->ISPRC));//gas->ISPRC=0; //data os zero unless explicitly set
//memset(gas->ISPRK,0,sizeof(**gas->ISPRK));//gas->ISPRK=0;
//memset(gas->SPRC,0,sizeof(****gas->SPRC));//gas->SPRC=0.e00;
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->ISPRC[i][j]=0;
}
}
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->ISPRK[i][j]=0;
}
}
for(int i=0;i<5;i++){
for(int j=0;j<gas->MSP+1;j++){
for(int k=0;k<gas->MSP+1;k++){
for(int l=0;l<gas->MSP+1;l++)
gas->SPEX[i][j][k][l]=0.e00;
}
}
}
gas->ISPRC[3][3]=1; //O+O -> O2 recombined species code for an O+O recombination
gas->ISPRK[3][3]=1;
gas->SPRC[1][3][3][1]=0.04e00;
gas->SPRC[2][3][3][1]=-1.3e00;
gas->SPRC[1][3][3][2]=0.07e00;
gas->SPRC[2][3][3][2]=-1.2e00;
gas->SPRC[1][3][3][3]=0.08e00;
gas->SPRC[2][3][3][3]=-1.2e00;
gas->SPRC[1][3][3][4]=0.09e00;
gas->SPRC[2][3][3][4]=-1.2e00;
gas->SPRC[1][3][3][5]=0.065e00;
gas->SPRC[2][3][3][5]=-1.2e00;
gas->SPRT[1][3][3]=5000.e00;
gas->SPRT[2][3][3]=15000.e00;
gas->ISPRC[4][4]=2; //N+N -> N2
gas->ISPRK[4][4]=1;
gas->SPRC[1][4][4][1]=0.15e00;
gas->SPRC[2][4][4][1]=-2.05e00;
gas->SPRC[1][4][4][2]=0.09e00;
gas->SPRC[2][4][4][2]=-2.1e00;
gas->SPRC[1][4][4][3]=0.16e00;
gas->SPRC[2][4][4][3]=-2.0e00;
gas->SPRC[1][4][4][4]=0.17e00;
gas->SPRC[2][4][4][4]=-2.0e00;
gas->SPRC[1][4][4][5]=0.17e00;
gas->SPRC[2][4][4][5]=-2.1e00;
gas->SPRT[1][4][4]=5000.e00;
gas->SPRT[2][4][4]=15000.e00;
gas->ISPRC[3][4]=5;
gas->ISPRK[3][4]=1;
gas->SPRC[1][3][4][1]=0.3e00;
gas->SPRC[2][3][4][1]=-1.9e00;
gas->SPRC[1][3][4][2]=0.4e00;
gas->SPRC[2][3][4][2]=-2.0e00;
gas->SPRC[1][3][4][3]=0.3e00;
gas->SPRC[2][3][4][3]=-1.75e00;
gas->SPRC[1][3][4][4]=0.3e00;
gas->SPRC[2][3][4][4]=-1.75e00;
gas->SPRC[1][3][4][5]=0.15e00;
gas->SPRC[2][3][4][5]=-1.9e00;
gas->SPRT[1][3][4]=5000.e00;
gas->SPRT[2][3][4]=15000.e00;
//set the exchange reaction data
//memset(gas->SPEX,0,sizeof(****gas->SPEX));//gas->SPEX=0.e00;
for(int i=0;i<7;i++){
for(int j=0;j<gas->MMEX+1;j++){
for(int k=0;k<gas->MSP+1;k++){
for(int l=0;l<gas->MSP+1;l++)
gas->SPEX[i][j][k][l]=0.e00;
}
}
}
gas->ISPEX=0;
gas->NSPEX=0;
gas->NSPEX[2][3]=1;
gas->NSPEX[4][5]=1;
gas->NSPEX[3][5]=1;
gas->NSPEX[1][4]=1;
//N2+O->NO+N
gas->ISPEX[1][1][2][3]=2;
gas->ISPEX[1][2][2][3]=3;
gas->ISPEX[1][3][2][3]=5;
gas->ISPEX[1][4][2][3]=4;
gas->ISPEX[1][5][2][3]=1;
gas->ISPEX[1][6][2][3]=1;
gas->SPEX[6][1][2][3]=0.e00;
gas->NEX[1][2][3]=1;
//NO+N->N2+0
gas->ISPEX[1][1][4][5]=5;
gas->ISPEX[1][2][4][5]=4;
gas->ISPEX[1][3][4][5]=2;
gas->ISPEX[1][4][4][5]=3;
gas->ISPEX[1][5][4][5]=1;
gas->ISPEX[1][6][4][5]=1;
gas->ISPEX[1][7][4][5]=1;
gas->SPEX[1][1][4][5]=0.8e00;
gas->SPEX[2][1][4][5]=-0.75e00;
gas->SPEX[4][1][4][5]=5000.e00;
gas->SPEX[5][1][4][5]=15000.e00;
gas->SPEX[6][1][4][5]=0.e00;
gas->NEX[1][4][5]=2;
//NO+O->O2+N
gas->ISPEX[1][1][3][5]=5;
gas->ISPEX[1][2][3][5]=3;
gas->ISPEX[1][3][3][5]=1;
gas->ISPEX[1][4][3][5]=4;
gas->ISPEX[1][5][3][5]=1;
gas->ISPEX[1][6][3][5]=1;
gas->SPEX[6][1][3][5]=2.e-19;
gas->NEX[1][3][5]=3;
//O2+N->NO+O
gas->ISPEX[1][1][1][4]=1;
gas->ISPEX[1][2][1][4]=4;
gas->ISPEX[1][3][1][4]=5;
gas->ISPEX[1][4][1][4]=3;
gas->ISPEX[1][5][1][4]=1;
gas->ISPEX[1][6][1][4]=1;
gas->ISPEX[1][7][1][4]=1 ;
gas->SPEX[1][1][1][4]=7.e00;
gas->SPEX[2][1][1][4]=-0.85e00;
gas->SPEX[4][1][1][4]=5000.e00;
gas->SPEX[5][1][1][4]=15000.e00;
gas->SPEX[6][1][1][4]=0.e00;
gas->NEX[1][1][4]=4;
DERIVED_GAS_DATA();
cout<<"REAL_AIR data done"<<endl;
return;
}
//
void HELIUM_ARGON_XENON()
{
//GAS gas;
//CALC calc;
cout<<"Reading HELIUM_ARGON_XENON data"<<endl;
gas->MSP=3;
gas->MMRM=0;
gas->MMVM=0;
gas->MNSR=0;
gas->MEX=0;
gas->MMEX=0;
gas->MELE=1;
gas->MVIBL=0;
ALLOCATE_GAS();
gas->SP[1][1]=2.30e-10; //2.33D-10
gas->SP[2][1]=273.0;
gas->SP[3][1]=0.66;
gas->SP[4][1]=0.794; //1.
gas->SP[5][1]=6.65e-27;
gas->ISPR[1][1]=0;
gas->ISPR[2][1]=0;
//
gas->SP[1][2]=4.11e-10; //4.17D-10
gas->SP[2][2]=273.15;
gas->SP[3][2]=0.81;
gas->SP[4][2]=0.714; //1.
gas->SP[5][2]=6.63e-26;
gas->ISPR[1][2]=0;
gas->ISPR[2][2]=0;
//
gas->SP[1][3]=5.65e-10; //5.74D-10
gas->SP[2][3]=273.0;
gas->SP[3][3]=0.85;
gas->SP[4][3]=0.694; //1.
gas->SP[5][3]=21.8e-26;
gas->ISPR[1][3]=0;
gas->ISPR[2][3]=0;
cout<<"HELIUM_ARGON_XENON data done"<<endl;
return;
}
//
void OXYGEN_HYDROGEN()
{
//
//GAS gas;
//CALC calc;
cout<<"Reading OXYGEN_HYDROGEN data"<<endl;
gas->MSP=8;
gas->MMRM=3;
gas->MMVM=3;
gas->MELE=1;
gas->MVIBL=40; //the maximum number of vibrational levels before a cumulative level reaches 1
//
gas->MEX=16;
gas->MMEX=3;
//
gas->MNSR=0;
//
ALLOCATE_GAS();
//
//species 1 is hydrogen H2
gas->SP[1][1]=2.92e-10;
gas->SP[2][1]=273.e00;
gas->SP[3][1]=0.67e00;
gas->SP[4][1]=1.e00;
gas->SP[5][1]=3.34e-27;
gas->SP[6][1]=0.e00;
gas->ISPR[1][1]=2;
gas->ISPR[2][1]=0;
gas->SPR[1][1]=5.e00;
gas->ISPV[1]=1; // the number of vibrational modes
gas->SPVM[1][1][1]=6159.e00; // the characteristic vibrational temperature
gas->SPVM[2][1][1]=20000.e00; //estimate
gas->SPVM[3][1][1]=2000.e00; //estimate
gas->SPVM[5][1][1]=1.0;
gas->ISPVM[1][1][1]=2;
gas->ISPVM[2][1][1]=2;
//species 2 is atomic hydrogen H
gas->SP[1][2]=2.5e-10; //estimate
gas->SP[2][2]=273.e00;
gas->SP[3][2]=0.8e00;
gas->SP[4][2]=1.e00;
gas->SP[5][2]=1.67e-27;
gas->SP[6][2]=3.62e-19;
gas->ISPR[1][2]=0;
gas->ISPV[2]=0;
//species 3 is oxygen O2
gas->SP[1][3]=4.07e-10;
gas->SP[2][3]=273.e00;
gas->SP[3][3]=0.77e00;
gas->SP[4][3]=1.e00;
gas->SP[5][3]=5.312e-26;
gas->SP[6][3]=0.e00;
gas->ISPR[1][3]=2;
gas->ISPR[2][3]=0;
gas->SPR[1][3]=5.e00;
gas->ISPV[3]=1; // the number of vibrational modes
gas->SPVM[1][1][3]=2256.e00; // the characteristic vibrational temperature
gas->SPVM[2][1][3]=18000.e00; //90000.D00 // a constant Zv, or the reference Zv
gas->SPVM[3][1][3]=2256.e00; // -1 for a constant Zv, or the reference temperature
gas->SPVM[5][1][3]=1.e00;
gas->ISPVM[1][1][3]=4;
gas->ISPVM[2][1][3]=4;
//species 4 is atomic oxygen O
gas->SP[1][4]=3.e-10; //estimate
gas->SP[2][4]=273.e00;
gas->SP[3][4]=0.8e00;
gas->SP[4][4]=1.e00;
gas->SP[5][4]=2.656e-26;
gas->SP[6][4]=4.099e-19;
gas->ISPR[1][4]=0;
gas->ISPV[4]=0;
//species 5 is hydroxy OH
gas->SP[1][5]=4.e-10; //estimate
gas->SP[2][5]=273.e00;
gas->SP[3][5]=0.75e00; //-estimate
gas->SP[4][5]=1.0e00;
gas->SP[5][5]=2.823e-26;
gas->SP[6][5]=6.204e-20;
gas->ISPR[1][5]=2;
gas->ISPR[2][5]=0;
gas->SPR[1][5]=5.e00;
gas->ISPV[5]=1;
gas->SPVM[1][1][5]=5360.e00;
gas->SPVM[2][1][5]=20000.e00; //estimate
gas->SPVM[3][1][5]=2500.e00; //estimate
gas->SPVM[5][1][5]=1.0e00;
gas->ISPVM[1][1][5]=2;
gas->ISPVM[2][1][5]=4;
//species 6 is water vapor H2O
gas->SP[1][6]=4.5e-10; //estimate
gas->SP[2][6]=273.e00;
gas->SP[3][6]=0.75e00 ; //-estimate
gas->SP[4][6]=1.0e00;
gas->SP[5][6]=2.99e-26;
gas->SP[6][6]=-4.015e-19;
gas->ISPR[1][6]=3;
gas->ISPR[2][6]=0;
gas->SPR[1][6]=5.e00;
gas->ISPV[6]=3;
gas->SPVM[1][1][6]=5261.e00; //symmetric stretch mode
gas->SPVM[2][1][6]=20000.e00; //estimate
gas->SPVM[3][1][6]=2500.e00; //estimate
gas->SPVM[5][1][6]=1.e00;
gas->SPVM[1][2][6]=2294.e00; //bend mode
gas->SPVM[2][2][6]=20000.e00; //estimate
gas->SPVM[3][2][6]=2500.e00; //estimate
gas->SPVM[5][2][6]=1.0e00;
gas->SPVM[1][3][6]=5432.e00; //asymmetric stretch mode
gas->SPVM[2][3][6]=20000.e00; //estimate
gas->SPVM[3][3][6]=2500.e00 ; //estimate
gas->SPVM[5][3][6]=1.e00;
gas->ISPVM[1][1][6]=2;
gas->ISPVM[2][1][6]=5;
gas->ISPVM[1][2][6]=2;
gas->ISPVM[2][2][6]=5;
gas->ISPVM[1][3][6]=2;
gas->ISPVM[2][3][6]=5;
//species 7 is hydroperoxy HO2
gas->SP[1][7]=5.5e-10; //estimate
gas->SP[2][7]=273.e00;
gas->SP[3][7]=0.75e00 ; //-estimate
gas->SP[4][7]=1.0e00;
gas->SP[5][7]=5.479e-26;
gas->SP[6][7]=2.04e-20;
gas->ISPR[1][7]=2; //assumes that HO2 is linear
gas->ISPR[2][7]=0;
gas->SPR[1][7]=5.e00;
gas->ISPV[7]=3;
gas->SPVM[1][1][7]=4950.e00;
gas->SPVM[2][1][7]=20000.e00; //estimate
gas->SPVM[3][1][7]=2500.e00 ; //estimate
gas->SPVM[5][1][7]=1.e00;
gas->SPVM[1][2][7]=2000.e00;
gas->SPVM[2][2][7]=20000.e00; //estimate
gas->SPVM[3][2][7]=2500.e00; //estimate
gas->SPVM[5][2][7]=1.e00;
gas->SPVM[1][3][7]=1580.e00;
gas->SPVM[2][3][7]=20000.e00; //estimate
gas->SPVM[3][3][7]=2500.e00; //estimate
gas->SPVM[5][3][7]=1.e00;
gas->ISPVM[1][1][7]=2;
gas->ISPVM[2][1][7]=3;
gas->ISPVM[1][2][7]=2;
gas->ISPVM[2][2][7]=3;
gas->ISPVM[1][3][7]=2;
gas->ISPVM[2][3][7]=3;
//Species 8 is argon
gas->SP[1][8]=4.17e-10;
gas->SP[2][8]=273.15;
gas->SP[3][8]=0.81 ;
gas->SP[4][8]=1.0;
gas->SP[5][8]=6.63e-26;
gas->SP[6][8]=0.e00;
gas->ISPR[1][8]=0;
gas->ISPV[8]=0;
//
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->ISPRC[i][j]=0;
}
}
//gas->ISPRC=0; //data is zero unless explicitly set
//
gas->ISPRC[4][4]=3; //O+O+M -> O2+M recombined species code for an O+O recombination
gas->ISPRK[4][4]=1;
gas->SPRC[1][4][4][1]=0.26e00;
gas->SPRC[2][4][4][1]=-1.3e00;
gas->SPRC[1][4][4][2]=0.29e00;
gas->SPRC[2][4][4][2]=-1.3e00;
gas->SPRC[1][4][4][3]=0.04e00;
gas->SPRC[2][4][4][3]=-1.5e00;
gas->SPRC[1][4][4][4]=0.1e00;
gas->SPRC[2][4][4][4]=-1.4e00;
gas->SPRC[1][4][4][5]=0.1e00;
gas->SPRC[2][4][4][5]=-1.4e00;
gas->SPRC[1][4][4][6]=0.1e00;
gas->SPRC[2][4][4][6]=-1.4e00;
gas->SPRC[1][4][4][7]=0.07e00;
gas->SPRC[2][4][4][7]=-1.5e00;
gas->SPRC[1][4][4][8]=0.07e00;
gas->SPRC[2][4][4][8]=-1.5e00;
gas->SPRT[1][4][4]=1000.e00;
gas->SPRT[2][4][4]=3000.e00;
//
gas->ISPRC[2][2]=1; //H+H+M -> H2+M
gas->ISPRK[2][2]=1;
gas->SPRC[1][2][2][1]=0.07e00;
gas->SPRC[2][2][2][1]=-2.e00;
gas->SPRC[1][2][2][2]=0.11e00;
gas->SPRC[2][2][2][2]=-2.2e00;
gas->SPRC[1][2][2][3]=0.052e00;
gas->SPRC[2][2][2][3]=-2.5e00;
gas->SPRC[1][2][2][4]=0.052e00;
gas->SPRC[2][2][2][4]=-2.5e00;
gas->SPRC[1][2][2][5]=0.052e00;
gas->SPRC[2][2][2][5]=-2.5e00;
gas->SPRC[1][2][2][6]=0.052e00;
gas->SPRC[2][2][2][6]=-2.5e00;
gas->SPRC[1][2][2][7]=0.052e00;
gas->SPRC[2][2][2][7]=-2.5e00;
gas->SPRC[1][2][2][8]=0.04e00;
gas->SPRC[2][2][2][7]=-2.5e00;
gas->SPRT[1][2][2]=1000.e00;
gas->SPRT[2][2][2]=3000.e00;
//
gas->ISPRC[2][4]=5; //H+0+M -> OH+M
gas->ISPRK[2][4]=1;
gas->SPRC[1][2][4][1]=0.15e00;
gas->SPRC[2][2][4][1]=-2.e00;
gas->SPRC[1][2][4][2]=0.04e00;
gas->SPRC[2][2][4][2]=-1.3e00;
gas->SPRC[1][2][4][3]=0.04e00;
gas->SPRC[2][2][4][3]=-1.3e00;
gas->SPRC[1][2][4][4]=0.04e00;
gas->SPRC[2][2][4][4]=-1.3e00;
gas->SPRC[1][2][4][5]=0.04e00;
gas->SPRC[2][2][4][5]=-1.3e00;
gas->SPRC[1][2][4][6]=0.21e00;
gas->SPRC[2][2][4][6]=-2.1e00;
gas->SPRC[1][2][4][7]=0.18e00;
gas->SPRC[2][2][4][7]=-2.3e00;
gas->SPRC[1][2][4][8]=0.16e00;
gas->SPRC[2][2][4][8]=-2.3e00;
gas->SPRT[1][2][4]=1000.e00;
gas->SPRT[2][2][4]=3000.e00;
//
gas->ISPRC[2][5]=6; //H+OH+M -> H2O+M
gas->ISPRK[2][5]=1;
gas->SPRC[1][2][5][1]=0.1e00;
gas->SPRC[2][2][5][1]=-2.0e00;
gas->SPRC[1][2][5][2]=0.1e00;
gas->SPRC[2][2][5][2]=-2.0e00;
gas->SPRC[1][2][5][3]=0.0025e00;
gas->SPRC[2][2][5][3]=-2.2e00;
gas->SPRC[1][2][5][4]=0.0025e00;
gas->SPRC[2][2][5][4]=-2.2e00;
gas->SPRC[1][2][5][5]=0.0025e00;
gas->SPRC[2][2][5][5]=-2.2e00;
gas->SPRC[1][2][5][6]=0.0015e00;
gas->SPRC[2][2][5][6]=-2.2e00;
gas->SPRC[1][2][5][7]=0.0027e00;
gas->SPRC[2][2][5][7]=-2.e00;
gas->SPRC[1][2][5][8]=0.0025e00;
gas->SPRC[2][2][5][8]=-2.e00;
gas->SPRT[1][2][5]=1000.e00;
gas->SPRT[2][2][5]=3000.e00;
//
gas->ISPRC[2][3]=7; //H+O2+M -> H02+M
gas->ISPRK[2][3]=1;
gas->SPRC[1][2][3][1]=0.0001e00;
gas->SPRC[2][2][3][1]=-1.7e00;
gas->SPRC[1][2][3][2]=0.0001e00;
gas->SPRC[2][2][3][2]=-1.7e00;
gas->SPRC[1][2][3][3]=0.00003e00;
gas->SPRC[2][2][3][3]=-1.5e00;
gas->SPRC[1][2][3][4]=0.00003e00;
gas->SPRC[2][2][3][4]=-1.7e00;
gas->SPRC[1][2][3][5]=0.00003e00;
gas->SPRC[2][2][3][5]=-1.7e00;
gas->SPRC[1][2][3][6]=0.00003e00;
gas->SPRC[2][2][3][6]=-1.7e00;
gas->SPRC[1][2][3][7]=0.000012e00;
gas->SPRC[2][2][3][7]=-1.7e00;
gas->SPRC[1][2][3][8]=0.00002e00;
gas->SPRC[2][2][3][8]=-1.7e00;
gas->SPRT[1][2][3]=1000.e00;
gas->SPRT[2][2][3]=3000.e00;
//
//set the exchange reaction data
// memset(gas->SPEX,0,sizeof(****gas->SPEX));//gas->SPEX=0.e00; //all activation energies and heats of reaction are zero unless set otherwise
for(int i=0;i<7;i++){
for(int j=0;j<gas->MMEX+1;j++){
for(int k=0;k<gas->MSP+1;k++){
for(int l=0;l<gas->MSP+1;l++)
gas->SPEX[i][j][k][l]=0.e00;
}
}
}
//gas->ISPEX=0; // ISPEX is also zero unless set otherwise
for(int i=0;i<gas->MMEX+1;i++){
for(int j=0;j<8;j++){
for(int k=0;k<gas->MSP+1;k++){
for(int l=0;l<gas->MSP+1;l++)
gas->ISPEX[i][j][k][l]=0.e00;
}
}
}
//gas->NSPEX=0;
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->NSPEX[i][j]=0;
}
}
//set the number of exchange reactions for each species pair
gas->NSPEX[1][3]=1;
gas->NSPEX[2][7]=3;
gas->NSPEX[2][3]=1;
gas->NSPEX[4][5]=1;
gas->NSPEX[1][4]=1;
gas->NSPEX[2][5]=1;
gas->NSPEX[1][5]=1;
gas->NSPEX[2][6]=1;
gas->NSPEX[4][6]=2;
gas->NSPEX[5][5]=2;
gas->NSPEX[4][7]=1;
gas->NSPEX[3][5]=1;
//set the information on the chain reactions
//
//H2+O2 -> HO2+H
gas->ISPEX[1][1][1][3]=1;
gas->ISPEX[1][2][1][3]=3;
gas->ISPEX[1][3][1][3]=7;
gas->ISPEX[1][4][1][3]=2;
gas->ISPEX[1][5][1][3]=1;
gas->ISPEX[1][6][1][3]=1;
gas->SPEX[6][1][1][3]=0.e00;
gas->NEX[1][1][3]=1;
//
//HO2+H -> H2+02
gas->ISPEX[1][1][2][7]=7;
gas->ISPEX[1][2][2][7]=2;
gas->ISPEX[1][3][2][7]=1;
gas->ISPEX[1][4][2][7]=3;
gas->ISPEX[1][5][2][7]=1;
gas->ISPEX[1][6][2][7]=1;
gas->ISPEX[1][7][2][7]=1;
//H02 is H-O-O so that not all vibrational modes contribute to this reaction, but the numbers here are guesses//
gas->SPEX[1][1][2][7]=20.e00;
gas->SPEX[2][1][2][7]=0.4e00;
gas->SPEX[4][1][2][7]=2000.e00;
gas->SPEX[5][1][2][7]=3000.e00;
gas->SPEX[6][1][2][7]=0.e00;
gas->NEX[1][2][7]=2;
//
//O2+H -> OH+O
gas->ISPEX[1][1][2][3]=3;
gas->ISPEX[1][2][2][3]=2;
gas->ISPEX[1][3][2][3]=5;
gas->ISPEX[1][4][2][3]=4;
gas->ISPEX[1][5][2][3]=1;
gas->ISPEX[1][6][2][3]=1;
gas->SPEX[6][1][2][3]=0.e00;
gas->NEX[1][2][3]=3;
//
//OH+O -> O2+H
gas->ISPEX[1][1][4][5]=5;
gas->ISPEX[1][2][4][5]=4;
gas->ISPEX[1][3][4][5]=3;
gas->ISPEX[1][4][4][5]=2;
gas->ISPEX[1][5][4][5]=1;
gas->ISPEX[1][6][4][5]=1;
gas->ISPEX[1][7][4][5]=1;
gas->SPEX[1][1][4][5]=0.65e00;
gas->SPEX[2][1][4][5]=-0.26;
gas->SPEX[4][1][4][5]=2000.e00;
gas->SPEX[5][1][4][5]=3000.e00;
gas->SPEX[6][1][4][5]=0.e00;
gas->NEX[1][4][5]=4;
//
//H2+O -> OH+H
gas->ISPEX[1][1][1][4]=1;
gas->ISPEX[1][2][1][4]=4;
gas->ISPEX[1][3][1][4]=5;
gas->ISPEX[1][4][1][4]=2;
gas->ISPEX[1][5][1][4]=1;
gas->ISPEX[1][6][1][4]=1;
gas->SPEX[6][1][1][4]=0.e00;
gas->NEX[1][1][4]=5;
//
//OH+H -> H2+O
gas->ISPEX[1][1][2][5]=5;
gas->ISPEX[1][2][2][5]=2;
gas->ISPEX[1][3][2][5]=1;
gas->ISPEX[1][4][2][5]=4;
gas->ISPEX[1][5][2][5]=1;
gas->ISPEX[1][6][2][5]=1;
gas->ISPEX[1][7][2][5]=1;
gas->SPEX[1][1][2][5]=0.5e00;
gas->SPEX[2][1][2][5]=-0.2e00;
gas->SPEX[4][1][2][5]=2000.e00;
gas->SPEX[5][1][2][5]=3000.e00;
gas->SPEX[6][1][2][5]=0.e00;
gas->NEX[1][2][5]=6;
//
//H20+H -> OH+H2
gas->ISPEX[1][1][2][6]=6;
gas->ISPEX[1][2][2][6]=2;
gas->ISPEX[1][3][2][6]=5;
gas->ISPEX[1][4][2][6]=1;
gas->ISPEX[1][5][2][6]=1;
gas->ISPEX[1][6][2][6]=1;
gas->SPEX[6][1][2][6]=2.0e-19;
gas->NEX[1][2][6]=7;
//OH+H2 -> H2O+H
gas->ISPEX[1][1][1][5]=5;
gas->ISPEX[1][2][1][5]=1;
gas->ISPEX[1][3][1][5]=6;
gas->ISPEX[1][4][1][5]=2;
gas->ISPEX[1][5][1][5]=1;
gas->ISPEX[1][6][1][5]=1;
gas->ISPEX[1][7][1][5]=1;
gas->SPEX[1][1][1][5]=0.5;
gas->SPEX[2][1][1][5]=-0.2;
gas->SPEX[4][1][1][5]=2000.e00;
gas->SPEX[5][1][1][5]=3000.e00;
gas->SPEX[6][1][1][5]=0.e00;
gas->NEX[1][1][5]=8;
//
//H2O+O -> OH+OH
gas->ISPEX[1][1][4][6]=6;
gas->ISPEX[1][2][4][6]=4;
gas->ISPEX[1][3][4][6]=5;
gas->ISPEX[1][4][4][6]=5;
gas->ISPEX[1][5][4][6]=1;
gas->ISPEX[1][6][4][6]=1;
gas->SPEX[6][1][4][6]=0.e00;
gas->NEX[1][4][6]=9;
//
//0H+OH -> H2O+O
gas->ISPEX[1][1][5][5]=5;
gas->ISPEX[1][2][5][5]=5;
gas->ISPEX[1][3][5][5]=6;
gas->ISPEX[1][4][5][5]=4;
gas->ISPEX[1][5][5][5]=1;
gas->ISPEX[1][6][5][5]=1;
gas->ISPEX[1][7][5][5]=1;
gas->SPEX[1][1][5][5]=0.35;
gas->SPEX[2][1][5][5]=-0.2 ;
gas->SPEX[4][1][5][5]=2000.e00;
gas->SPEX[5][1][5][5]=3000.e00;
gas->SPEX[6][1][5][5]=0.e00;
gas->NEX[1][5][5]=10;
//
//OH+OH -> HO2+H
//
gas->ISPEX[2][1][5][5]=5;
gas->ISPEX[2][2][5][5]=5;
gas->ISPEX[2][3][5][5]=7;
gas->ISPEX[2][4][5][5]=2;
gas->ISPEX[2][5][5][5]=1;
gas->ISPEX[2][6][5][5]=1;
gas->SPEX[6][2][5][5]=0.e00;
gas->NEX[2][5][5]=11;
//
//H02+H -> 0H+OH
gas->ISPEX[2][1][2][7]=7;
gas->ISPEX[2][2][2][7]=2;
gas->ISPEX[2][3][2][7]=5;
gas->ISPEX[2][4][2][7]=5;
gas->ISPEX[2][5][2][7]=1;
gas->ISPEX[2][6][2][7]=1;
gas->ISPEX[2][7][2][7]=1;
gas->SPEX[1][2][2][7]=120.e00;
gas->SPEX[2][2][2][7]=-0.05e00;
gas->SPEX[4][2][2][7]=2000.e00;
gas->SPEX[5][2][2][7]=3000.e00;
gas->SPEX[6][2][2][7]=0.e00;
gas->NEX[2][2][7]=12;
//
//H2O+O -> HO2+H
//
gas->ISPEX[2][1][4][6]=6;
gas->ISPEX[2][2][4][6]=4;
gas->ISPEX[2][3][4][6]=7;
gas->ISPEX[2][4][4][6]=2;
gas->ISPEX[2][5][4][6]=1;
gas->ISPEX[2][6][4][6]=1;
gas->SPEX[6][2][4][6]=0.e00;
gas->NEX[2][4][6]=13;
//
//H02+H -> H2O+O
//
gas->ISPEX[3][1][2][7]=7;
gas->ISPEX[3][2][2][7]=2;
gas->ISPEX[3][3][2][7]=6;
gas->ISPEX[3][4][2][7]=4;
gas->ISPEX[3][5][2][7]=1;
gas->ISPEX[3][6][2][7]=1;
gas->ISPEX[3][7][2][7]=1;
gas->SPEX[1][3][2][7]=40.e00;
gas->SPEX[2][3][2][7]=-1.e00;
gas->SPEX[4][3][2][7]=2000.e00;
gas->SPEX[5][3][2][7]=3000.e00;
gas->SPEX[6][3][2][7]=0.e00;
gas->NEX[3][2][7]=14;
//
//OH+O2 -> HO2+O
//
gas->ISPEX[1][1][3][5]=5;
gas->ISPEX[1][2][3][5]=3;
gas->ISPEX[1][3][3][5]=7;
gas->ISPEX[1][4][3][5]=4;
gas->ISPEX[1][5][3][5]=1;
gas->ISPEX[1][6][3][5]=1;
gas->SPEX[6][1][3][5]=0.e00;
gas->NEX[1][3][5]=15;
//
//H02+0 -> OH+O2
//
gas->ISPEX[1][1][4][7]=7;
gas->ISPEX[1][2][4][7]=4;
gas->ISPEX[1][3][4][7]=5;
gas->ISPEX[1][4][4][7]=3;
gas->ISPEX[1][5][4][7]=1;
gas->ISPEX[1][6][4][7]=1;
gas->ISPEX[1][7][4][7]=1;
gas->SPEX[1][1][4][7]=100.e00;
gas->SPEX[2][1][4][7]=0.15e00;
gas->SPEX[4][1][4][7]=2000.e00;
gas->SPEX[5][1][4][7]=3000.e00;
gas->SPEX[6][1][4][7]=0.e00;
gas->NEX[1][4][7]=16;
//
DERIVED_GAS_DATA();
//
cout<<"OXYGEN_HYDROGEN data done"<<endl;
return;
}
//***************************************************************************
//*************************END OF GAS DATABASE*******************************
//***************************************************************************
//
void DERIVED_GAS_DATA()
{
//
//GAS gas;
//CALC calc;
int I,II,J,JJ,K,L,M,MM,N,JMAX,MOLSP,MOLOF,NSTEP,IMAX;
double A,B,BB,C,X,T,CUR,EAD,TVD,ZVT,ERD,PETD,DETD,PINT,ETD,SUMD,VAL;
double **BFRAC,**TOT;
double ****VRRD;
double *****VRREX;
//
//VRRD(1,L,M,K) dissociation rate coefficient to species L,M for vibrational level K at 5,000 K
//VRRD(2,L,M,K) similar for 15,000 K
//VRREX(1,J,L,M,K) Jth exchange rate coefficient to species L,M for vibrational level K at 1,000 K
//VRREX(2,J,L,M,K) similar for 3,000 K
//BFRAC(2,J) Boltzmann fraction
//JMAX imax-1
//T temperature
//CUR sum of level resolved rates
//
VRRD = new double ***[3];
for (int i = 0; i < 3; ++i)
{
VRRD[i] = new double **[gas->MSP+1];
for (int j = 0; j < gas->MSP+1; ++j)
{
VRRD[i][j] = new double *[gas->MSP+1];
for(int k=0; k<gas->MSP+1; ++k)
VRRD[i][j][k]=new double [gas->MVIBL+1];
}
}
BFRAC = new double*[gas->MVIBL+1];
for(int i =0; i< (gas->MVIBL+1); ++i)
BFRAC[i] = new double[3];
VRREX = new double ****[3];
for (int i = 0; i < 3; ++i)
{
VRREX[i] = new double ***[gas->MMEX+1];
for (int j = 0; j < gas->MMEX+1; ++j)
{
VRREX[i][j] = new double **[gas->MSP+1];
for(int k=0; k<gas->MSP+1; ++k)
{
VRREX[i][j][k]=new double *[gas->MSP+1];
for(int l=0; l<gas->MSP+1; ++l)
VRREX[i][j][k][l]= new double[gas->MVIBL+1];
}
}
}
TOT = new double*[gas->MVIBL+1];
for(int i =0; i< (gas->MVIBL+1); ++i)
TOT[i] = new double[3];
// ALLOCATE (VRRD(2,MSP,MSP,0:MVIBL),BFRAC(0:MVIBL,2),VRREX(2,MMEX,MSP,MSP,0:MVIBL),TOT(0:MVIBL,2),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*)'PROGRAM COULD NOT ALLOCATE VIB. RES. DISS. RATES',ERROR
// END IF
//
cout<<"Setting derived gas data"<<endl;
//copy the L,M data that has been specified for L < M so that it applies also for M>L
for(L=1;L<=gas->MSP;L++){
for(M=1;M<=gas->MSP;M++){
if(L > M){
gas->NSPEX[L][M]=gas->NSPEX[M][L];
gas->ISPRC[L][M]=gas->ISPRC[M][L];
gas->ISPRK[L][M]=gas->ISPRK[M][L];
for(K=1;K<=gas->MSP;K++){
gas->SPRT[1][L][M]=gas->SPRT[1][M][L];
gas->SPRT[2][L][M]=gas->SPRT[2][M][L];
gas->SPRC[1][L][M][K]=gas->SPRC[1][M][L][K];
gas->SPRC[2][L][M][K]=gas->SPRC[2][M][L][K];
}
for(K=1;K<=gas->MMEX;K++){
gas->NEX[K][L][M]=gas->NEX[K][M][L];
for(J=1;J<=6;J++){
gas->SPEX[J][K][L][M]=gas->SPEX[J][K][M][L];
}
for(J=1;J<=7;J++){
gas->ISPEX[K][J][L][M]=gas->ISPEX[K][J][M][L];
}
}
}
}
}
//
if(gas->MMVM > 0){
//set the characteristic dissociation temperatures
for(L=1;L<=gas->MSP;L++){
if(gas->ISPV[L] > 0){
for(K=1;K<=gas->ISPV[L];K++)
{
I=gas->ISPVM[1][K][L];
J=gas->ISPVM[2][K][L];
gas->SPVM[4][K][L]=(gas->SP[6][I]+gas->SP[6][J]-gas->SP[6][L])/BOLTZ;
//WRITE (9,*) 'Char. Diss temp of species',L,' is',SPVM(4,K,L)
file_9<<"Char. Diss temp of species "<<L<<" is "<<gas->SPVM[4][K][L]<<endl;
}
}
}
}
//
if(gas->MMEX > 0){
//set the heats of reaction of the exchange and chain reactions
for(L=1;L<=gas->MSP;L++){
for(M=1;M<=gas->MSP;M++){
for(J=1;J<=gas->MMEX;J++){
if((gas->ISPEX[J][3][L][M]> 0) && (gas->ISPEX[J][4][L][M]>0) && (gas->ISPEX[J][1][L][M]>0) && (gas->ISPEX[J][2][L][M]>0)){
gas->SPEX[3][J][L][M]=gas->SP[6][gas->ISPEX[J][1][L][M]]+gas->SP[6][gas->ISPEX[J][2][L][M]]-gas->SP[6][gas->ISPEX[J][3][L][M]]-gas->SP[6][gas->ISPEX[J][4][L][M]];
// WRITE (9,*) 'Reaction',NEX(J,L,M),' heat of reaction',SPEX(3,J,L,M)
file_9<<"Reaction "<<gas->NEX[J][L][M]<<" heat of reaction"<<gas->SPEX[3][J][L][M]<<endl;
}
}
}
}
}
//
if(gas->MELE > 1){
//set the electronic cross-section ratios to a mean electronic relaxation collision number
//(equipartition is not achieved unless there is a single number)
for(L=1;L<=gas->MSP;L++){
A=0.e00;
for(K=1;K<=gas->NELL[L];K++){
A=A+gas->QELC[3][K][L];
}
gas->QELC[3][1][L]=A/double(gas->NELL[L]);
}
}
//
//set the cumulative distributions of the post-recombination vibrational distributions for establishment of detailed balance
for(L=1;L<=gas->MSP;L++){
for(M=1;M<=gas->MSP;M++){
if(gas->ISPRC[L][M] > 0){
N=gas->ISPRC[L][M]; //recombined species
K=gas->ISPRK[L][M]; //relevant vibrational mode
//WRITE (9,*) 'SPECIES',L,M,' RECOMBINE TO',N
file_9<<"SPECIES "<<L<<" "<<M<<" RECOMBINE TO"<<N<<endl;
JMAX=gas->SPVM[4][K][N]/gas->SPVM[1][K][N];
if(JMAX > gas->MVIBL){
cout<<" The variable MVIBL="<<gas->MVIBL<<" in the gas database must be increased to"<<JMAX<<endl;
cout<<"Enter 0 ENTER to stop";
cin>> A;
return ;
}
A=2.5e00-gas->SP[3][N];
for(I=1;I<=2;I++){
if(I == 1) T=gas->SPRT[1][L][M];
if(I == 2) T=gas->SPRT[2][L][M];
//WRITE (9,*) 'TEMPERATURE',T
file_9<<"TEMPERATURE "<<T<<endl;
CUR=0.e00;
for(J=0;J<=JMAX;J++){
X=double(JMAX+1-J)*gas->SPVM[1][K][N]/T;
CQAX(A,X,B);
VRRD[I][L][M][J]=B*exp(-double(J)*gas->SPVM[1][K][N]/T);
CUR=CUR+VRRD[I][L][M][J];
}
B=0.e00;
for(J=0;J<=JMAX;J++){
B=B+VRRD[I][L][M][J]/CUR;
gas->SPRP[I][L][M][J]=B;
//WRITE (9,*) 'CDF level dissoc',J,SPRP(I,L,M,J)
file_9<< "CDF level dissoc "<<J<<" "<<gas->SPRP[I][L][M][J];
}
}
}
}
}
//
//READ (*,*) //optionally pause program to check cumulative distributions for exchange and chain reactions
//
//set the cumulative distributions of the post-reverse vibrational distributions for establishment of detailed balance
for(L=1;L<=gas->MSP;L++){
for(M=1;M<=gas->MSP;M++){
if(gas->NSPEX[L][M] > 0){
for(K=1;K<=gas->NSPEX[L][M];K++){
if(gas->SPEX[3][K][L][M] > 0.e00){ //exothermic (reverse) exchange reaction
//L,M are the species in the reverse reaction, E_a of forward reaction is SPEX(3,K,L,M)
//WRITE (9,*) 'SPECIES',L,M,' REVERSE REACTION'
file_9<<"SPECIES "<<L<<" "<<M<<" REVERSE REACTION"<<endl;
MOLSP=gas->ISPEX[K][3][L][M]; //molecuke that splits in the forward reaction
MOLOF=gas->ISPEX[K][4][L][M];
JMAX=(gas->SPEX[3][K][L][M]+gas->SPEX[6][K][MOLSP][MOLOF])/(BOLTZ*gas->SPVM[1][gas->ISPEX[K][5][L][M]][MOLSP])+15; //should always be less than the JMAX set by dissociation reactions
for(I=1;I<=2;I++){
if(I == 1) T=gas->SPEX[4][K][L][M];
if(I == 2) T=gas->SPEX[5][K][L][M];
for(J=0;J<=JMAX;J++){
EAD=(gas->SPEX[3][K][L][M]+gas->SPEX[6][K][MOLSP][MOLOF])/(BOLTZ*T);
TVD=gas->SPVM[1][gas->ISPEX[K][5][L][M]][MOLSP]/T;
ZVT=1.e00/(1.e00-exp(-TVD));
C=ZVT/(tgamma(2.5e00-gas->SP[3][MOLSP])*exp(-EAD)); //coefficient of integral
ERD=EAD-double(J)*TVD;
if(ERD < 0.e00) ERD=0.e00;
PETD=ERD;
DETD=0.01e00;
PINT=0.e00; //progressive value of integral
NSTEP=0;
A=1.e00;
while(A > 1.e-10){
NSTEP=NSTEP+1;
ETD=PETD+0.5e00*DETD;
SUMD=0.e00; //normalizing sum in the denominator
IMAX=ETD/TVD+J;
for(II=0;II<=IMAX;II++){
SUMD=SUMD+pow((1.e00-double(II)*TVD/(ETD+double(J)*TVD)),(1.5e00-gas->SP[3][MOLSP]));
}
VAL=(pow((ETD*(1.e00-EAD/(ETD+double(J)*TVD))),(1.5e00-gas->SP[3][MOLSP]))/SUMD)*exp(-ETD);
PINT=PINT+VAL*DETD;
A=VAL/PINT;
PETD=ETD+0.5e00*DETD;
}
VRREX[I][K][L][M][J]=C*PINT;
// WRITE (*,*) 'Level ratio exch',I,J,VRREX(I,K,L,M,J)
}
}
//
//memset(TOT,0.e00,sizeof(**TOT));//TOT=0.e00;
for(int i=0;i<gas->MVIBL+1;i++){
for(int j=0;j<gas->MVIBL+1;j++){
TOT[i][j]=0;
}
}
for(I=1;I<=2;I++){
if(I == 1) T=gas->SPEX[4][K][L][M];
if(I == 2) T=gas->SPEX[5][K][L][M];
for(J=0;J<=JMAX;J++){
TVD=gas->SPVM[1][gas->ISPEX[K][5][L][M]][MOLSP]/T;
ZVT=1.e00/(1.e00-exp(-TVD));
BFRAC[J][I]=exp(-J*gas->SPVM[1][gas->ISPEX[K][5][L][M]][MOLSP]/T)/ZVT; //Boltzmann fraction
VRREX[I][K][L][M][J]=VRREX[I][K][L][M][J]*BFRAC[J][I];
// WRITE (*,*) 'Contribution',I,J,VRREX(I,K,L,M,J)
for(MM=0;MM<=J;MM++)
TOT[J][I]=TOT[J][I]+VRREX[I][K][L][M][MM];
}
}
//
for(I=1;I<=2;I++){
for(J=0;J<=JMAX;J++){
gas->SPREX[I][K][L][M][J]=TOT[J][I];
if(J == JMAX) gas->SPREX[I][K][L][M][J]=1.e00;
//WRITE (9,*) 'Cumulative',I,J,SPREX(I,K,L,M,J)
file_9<<"Cumulative "<<I<<" "<<J<<" "<<gas->SPREX[I][K][L][M][J];
}
}
}
}
gas->NSLEV=0;
//memset(gas->SLER,0.e00,sizeof(*gas->SLER));//gas->SLER=0.e00;
for(int i=0;i<gas->MSP+1;i++)
gas->SLER[i]=0.e00;
}
}
}
//
//READ (*,*) //optionally pause program to check cumulative distributions for exchange abd chain reactions
return;
}
void READ_DATA()
{
//CALC calc;
//MOLECS molecs;
//GAS gas;
//OUTPUT output;
//GEOM_1D geom;
fstream file_3;
fstream file_4;
int NVERD,MVERD,N,K;
if(calc->ICLASS==0)
{
cout<<"Reading the data file DS0D.DAT"<<endl;
file_4.open("DS0D.DAT", ios::in);
file_3.open("DS0D.TXT", ios::out);
file_3<<"Data summary for program DSMC"<<endl;
// OPEN (4,FILE='DS0D.DAT')
// OPEN (3,FILE='DS0D.TXT')
// WRITE (3,*) 'Data summary for program DSMC'
}
if(calc->ICLASS==1)
{
cout<<"Reading the data file DS1D.DAT"<<endl;
file_4.open("DS1D.DAT", ios::in);
file_3.open("DS1D.TXT", ios::out );
file_3<<"Data summary for program DSMC"<<endl;
// OPEN (4,FILE='DS1D.DAT')
// OPEN (3,FILE='DS1D.TXT')
// WRITE (3,*) 'Data summary for program DSMC'
}
//the following items are common to all classes of flow
file_4>>NVERD;
file_3<<"The n in version number n.m is "<<NVERD<<endl;
file_4>>MVERD;
file_3<<"The m in version number n.m is "<<MVERD<<endl;
file_4>>calc->IMEG;
file_3<<"The approximate number of megabytes for the calculation is "<<calc->IMEG<<endl;
file_4>>gas->IGAS;
file_3<<gas->IGAS<<endl;//gas->IGAS=1;
// READ (4,*) NVERD
// WRITE (3,*) 'The n in version number n.m is',NVERD
// READ (4,*) MVERD
// WRITE (3,*) 'The m in version number n.m is',MVERD
// READ (4,*) IMEG //calc->IMEG
// WRITE (3,*) 'The approximate number of megabytes for the calculation is',IMEG //calc->IMEG
// READ (4,*) IGAS //gas->IGAS
// WRITE (3,*) IGAS //gas->IGAS
if(gas->IGAS==1)
{
file_3<<" Hard sphere gas "<<endl;
// WRITE (3,*) 'Hard sphere gas'
HARD_SPHERE();
}
if(gas->IGAS==2)
{
file_3<<"Argon "<<endl;
// WRITE (3,*) 'Argon'
ARGON();
}
if(gas->IGAS==3)
{
file_3<<"Ideal nitrogen"<<endl;
// WRITE (3,*) 'Ideal nitrogen'
IDEAL_NITROGEN();
}
if(gas->IGAS==4)
{
file_3<<"Real oxygen "<<endl;
// WRITE (3,*) 'Real oxygen'
REAL_OXYGEN();
}
if(gas->IGAS==5)
{
file_3<<"Ideal air "<<endl;
// TE (3,*) 'Ideal air'
IDEAL_AIR();
}
if(gas->IGAS==6)
{
file_3<<"Real air @ 7.5 km/s "<<endl;
// RITE (3,*) 'Real air @ 7.5 km/s'
REAL_AIR();
}
if(gas->IGAS==7)
{
file_3<<"Helium-argon-xenon mixture "<<endl;
// WRITE (3,*) 'Helium-argon-xenon mixture'
HELIUM_ARGON_XENON();
}
if(gas->IGAS==8)
{
file_3<<"Oxygen-hydrogen "<<endl;
// WRRITE (3,*) 'Oxygen-hydrogen'
OXYGEN_HYDROGEN();
}
file_3<<"The gas properties are:- "<<endl;
file_4>>gas->FND[1];
file_3<<"The stream number density is "<<gas->FND[1]<<endl;
file_4>>gas->FTMP[1];
file_3<<"The stream temperature is "<<gas->FTMP[1]<<endl;
// WRITE (3,*) 'The gas properties are:-'
// READ (4,*) FND(1) //gas->FND[1]
// WRITE (3,*) ' The stream number density is',FND(1) ////gas->FND[1]
// READ (4,*) FTMP(1) //gas->FTMP[1]
// WRITE (3,*) ' The stream temperature is',FTMP(1) //gas->FTMP[1]
if(gas->MMVM>0)
{
file_4>>gas->FVTMP[1];
file_3<<"The stream vibrational and electronic temperature is "<<gas->FVTMP[1]<<endl;
// READ (4,*) FVTMP(1) //gas->FVTMP;
// WRITE (3,*) ' The stream vibrational and electronic temperature is',FVTMP(1) //gas->FVTMP[1]
}
if(calc->ICLASS==1)
{
file_4>>gas->VFX[1];
file_3<<"The stream velocity in the x direction is "<<gas->VFX[1]<<endl;
file_4>>gas->VFY[1];
file_3<<"The stream velocity in the y direction is "<<gas->VFY[1]<<endl;
// READ (4,*) VFX(1) //gas->VFX[1]
// WRITE (3,*) ' The stream velocity in the x direction is',VFX(1) //gas->VFX[1]
// READ (4,*) VFY(1) ////gas->VFY[1]
// WRITE (3,*) ' The stream velocity in the y direction is',VFY(1) ////gas->VFY[1]
}
if(gas->MSP>1)
{
for(N=1;N<=gas->MSP;N++)
{
file_4>>gas->FSP[N][1];
file_3<<" The fraction of species "<<N<<" is "<<gas->FSP[N][1]<<endl;
// READ (4,*) FSP(N,1) //gas->FSP[N][1]
// WRITE (3,*) ' The fraction of species',N,' is',FSP(N,1) //gas->FSP[N][1]
}
}
else
{
gas->FSP[1][1]=1.0; //simple gas
}
if(calc->ICLASS==0){
// !--a homogeneous gas case is calculated as a one-dimensional flow with a single sampling cell
// !--set the items that are required in the DS1D.DAT specification
geom->IFX=0;
geom->JFX=1;
geom->XB[1]=0.e00;
geom->XB[2]=0.0001e00*1.e25/gas->FND[1];
geom->ITYPE[1]=1;
geom->ITYPE[2]=1;
gas->VFX[1]=0.e00;
calc->IGS=1;
calc->ISECS=0;
calc->IREM=0;
calc->MOLSC=10000*calc->IMEG; //a single sampling cell
}
if(calc->ICLASS==1)
{
file_4>>geom->IFX;
// READ (4,*) IFX //geom->IFX
if(geom->IFX==0)
file_3<<"Plane Flow"<<endl;
// WRITE (3,*) 'Plane flow'
if(geom->IFX==1)
file_3<<"Cylindrical flow"<<endl;
// WRITE (3,*) 'Cylindrical flow'
if(geom->IFX==2)
file_3<<"Spherical flow"<<endl;
// WRITE (3,*) 'Spherical flow'
geom->JFX=geom->IFX+1;
file_4>>geom->XB[1];
// READ (4,*) XB(1) //geom->XB[1]
file_3<<"The minimum x coordinate is "<<geom->XB[1]<<endl;
// WRITE (3,*) 'The minimum x coordinate is',XB(1) //geom->XB[1]
file_4>>geom->ITYPE[1];
// READ (4,*) ITYPE(1) //geom->ITYPE[1]
if(geom->ITYPE[1]==0)
file_3<<"The minimum x coordinate is a stream boundary"<<endl;
// WRITE (3,*) 'The minimum x coordinate is a stream boundary'
if(geom->ITYPE[1]==1)
file_3<<"The minimum x coordinate is a plane of symmetry"<<endl;
// WRITE (3,*) 'The minimum x coordinate is a plane of symmetry'
if(geom->ITYPE[1]==2)
file_3<<"The minimum x coordinate is a solid surface"<<endl;
// WRITE (3,*) 'The minimum x coordinate is a solid surface'
if(geom->ITYPE[1]==3)
file_3<<"The minimum x coordinate is a vacuum"<<endl;
// WRITE (3,*) 'The minimum x coordinate is a vacuum'
if(geom->ITYPE[1]==4)
file_3<<"The minimum x coordinate is an axis or center"<<endl;
// WRITE (3,*) 'The minimum x coordinate is an axis or center'
if(geom->ITYPE[1]==2)
{
file_3<<"The minimum x boundary is a surface with the following properties"<<endl;
file_4>>gas->TSURF[1];
file_3<<"The temperature of the surface is "<<gas->TSURF[1]<<endl;
file_4>>gas->FSPEC[1];
file_3<<"The fraction of specular reflection is "<<gas->FSPEC[1]<<endl;
file_4>>gas->VSURF[1];
file_3<<"The velocity in the y direction of this surface is "<<gas->VSURF[1];
// WRITE (3,*) 'The minimum x boundary is a surface with the following properties'
// READ (4,*) TSURF(1) //gas->TSURF[1]
// WRITE (3,*) ' The temperature of the surface is',TSURF(1) //gas->TSURF[1]
// READ (4,*) FSPEC(1) //gas->FSPEC[1]
// WRITE (3,*) ' The fraction of specular reflection is',FSPEC(1) //gas->FSPEC[1]
// READ (4,*) VSURF(1) //gas->VSURF[1]
// WRITE (3,*) ' The velocity in the y direction of this surface is',VSURF(1) //gas->VSURF[1]
}
file_4>>geom->XB[2];
file_3<<"The maximum x coordinate is "<<geom->XB[2]<<endl;
file_4>>geom->ITYPE[2];
// READ (4,*) XB(2) //geom->XB[2]
// WRITE (3,*) 'The maximum x coordinate is',XB(2)//geom->XB[2]
// READ (4,*) ITYPE(2)//geom->ITYPE[2]
if(geom->ITYPE[2]==0)
file_3<<"The mmaximum x coordinate is a stream boundary"<<endl;
// WRITE (3,*) 'The mmaximum x coordinate is a stream boundary'
if(geom->ITYPE[2]==1)
file_3<<"The maximum x coordinate is a plane of symmetry"<<endl;
// WRITE (3,*) 'The maximum x coordinate is a plane of symmetry'
if(geom->ITYPE[2]==2)
file_3<<"The maximum x coordinate is a solid surface"<<endl;
// WRITE (3,*) 'The maximum x coordinate is a solid surface'
if(geom->ITYPE[2]==3)
file_3<<"The maximum x coordinate is a vacuum"<<endl;
// WRITE (3,*) 'The maximum x coordinate is a vacuum'
calc->ICN=0;
if(geom->ITYPE[2]==4)
{
file_3<<"The maximum x coordinate is a stream boundary with a fixed number of simulated molecules"<<endl;
// WRITE (3,*) 'The maximum x coordinate is a stream boundary with a fixed number of simulated molecules'
if(gas->MSP==1)
calc->ICN=1;
}
if(geom->ITYPE[2]==2)
{
file_3<<"The maximum x boundary is a surface with the following properties"<<endl;
file_4>>gas->TSURF[1];
file_3<<"The temperature of the surface is "<<gas->TSURF[1]<<endl;
file_4>>gas->FSPEC[1];
file_3<<"The fraction of specular reflection is "<<gas->FSPEC[1]<<endl;
file_4>>gas->VSURF[1];
file_3<<"The velocity in the y direction of this surface is "<<gas->VSURF[1]<<endl;
// WRITE (3,*) 'The maximum x boundary is a surface with the following properties'
// READ (4,*) TSURF(1) //gas->TSURF[1]
// WRITE (3,*) ' The temperature of the surface is',TSURF(1) //gas->TSURF[1]
// READ (4,*) FSPEC(1) //gas->FSPEC[1]
// WRITE (3,*) ' The fraction of specular reflection is',FSPEC(1) //gas->FSPEC[1]
// READ (4,*) VSURF(1) //gas->VSURF[1]
// WRITE (3,*) ' The velocity in the y direction of this surface is',VSURF(1) //gas->VSURF[1]
}
if(geom->IFX>0)
{
file_4>>geom->IWF;
// READ (4,*) READ (4,*) IWF //geom->IWF
if(geom->IWF==0)
file_3<<"There are no radial weighting factors"<<endl;
// WRITE (3,*) 'There are no radial weighting factors'
if(geom->IWF==1)
file_3<<"There are radial weighting factors"<<endl;
// WRITE (3,*) 'There are radial weighting factors'
if(geom->IWF==1)
{
file_4>>geom->WFM;
file_3<<"The maximum value of the weighting factor is "<<geom->WFM<<endl;
// READ (4,*) WFM //geom->WFM
// WRITE (3,*) 'The maximum value of the weighting factor is ',WFM //geom->WFM
geom->WFM=(geom->WFM-1)/geom->XB[2];
}
}
file_4>>calc->IGS;
// READ (4,*) IGS //calc->IGS
if(calc->IGS==0)
file_3<<"The flowfield is initially a vacuum "<<endl;
// WRITE (3,*) 'The flowfield is initially a vacuum'
if(calc->IGS==1)
file_3<<"The flowfield is initially the stream(s) or reference gas"<<endl;
// WRITE (3,*) 'The flowfield is initially the stream(s) or reference gas'
file_4>>calc->ISECS;
// READ (4,*) ISECS //calc->ISECS
if(calc->ISECS==0)
file_3<<"There is no secondary stream initially at x > 0"<<endl;
// WRITE (3,*) 'There is no secondary stream initially at x > 0'
if(calc->ISECS==1 && geom->IFX==0)
file_3<<"There is a secondary stream applied initially at x = 0 (XB(2) must be > 0)"<<endl;
// WRITE (3,*) 'There is a secondary stream applied initially at x = 0 (XB(2) must be > 0)'
if(calc->ISECS==1 && geom->IFX>0)
{
if(geom->IWF==1)
{
file_3<<"There cannot be a secondary stream when weighting factors are present"<<endl;
// WRITE (3,*) 'There cannot be a secondary stream when weighting factors are present'
return;//STOP//dout
}
file_3<<"There is a secondary stream"<<endl;
// WRITE (3,*) 'There is a secondary stream'
file_4>>geom->XS;
// READ (4,*) XS //geom->XS
file_3<<"The secondary stream boundary is at r= "<<geom->XS<<endl;
// WRITE (3,*) 'The secondary stream boundary is at r=',XS //geom->XS
}
if(calc->ISECS==1)
{
file_3<<"The secondary stream (at x>0 or X>XS) properties are:-"<<endl;
file_4>>gas->FND[2];
file_3<<"The stream number density is "<<gas->FND[2]<<endl;
file_4>>gas->FTMP[2];
file_3<<"The stream temperature is "<<gas->FTMP[2]<<endl;
// WRITE (3,*) 'The secondary stream (at x>0 or X>XS) properties are:-'
// READ (4,*) FND(2) //gas->FND
// WRITE (3,*) ' The stream number density is',FND(2) //gas->FND
// READ (4,*) FTMP(2) //gas->FTMP
// WRITE (3,*) ' The stream temperature is',FTMP(2) //gas->FTMP
if(gas->MMVM>0)
{
file_4>>gas->FVTMP[2];
file_3<<"The stream vibrational and electronic temperature is "<<gas->FVTMP[2]<<endl;
// READ (4,*) FVTMP(2) //gas->FVTMP[2]
// WRITE (3,*) ' The stream vibrational and electronic temperature is',FVTMP(2) //gas->FVTMP[2]
}
file_4>>gas->VFX[2];
file_3<<"The stream velocity in the x direction is "<<gas->VFX[2]<<endl;
file_4>>gas->VFY[2];
file_3<<"The stream velocity in the y direction is "<<gas->VFY[2]<<endl;
// READ (4,*) VFX(2) //gas->VFX
// WRITE (3,*) ' The stream velocity in the x direction is',VFX(2) //gas->VFX
// READ (4,*) VFY(2) //gas->VFY
// WRITE (3,*) ' The stream velocity in the y direction is',VFY(2) //gas->VFY
if(gas->MSP>1)
{
for(N=1;N<=gas->MSP;N++)
{
file_4>>gas->FSP[N][2];
file_3<<"The fraction of species "<<N<<" is "<<gas->FSP[N][2]<<endl;
// READ (4,*) FSP(N,2) //gas->FSP
// WRITE (3,*) ' The fraction of species',N,' is',FSP(N,2) //gas->FSP
}
}
else
{
gas->FSP[1][2]=1;
}
}
if(geom->IFX==0 && geom->ITYPE[1]==0)
{
file_4>>calc->IREM;
// READ (4,*) IREM //calc->IREM
if(calc->IREM==0)
{
file_3<<"There is no molecule removal"<<endl;
// WRITE (3,*) 'There is no molecule removal'
geom->XREM=geom->XB[1]-1.e00;
geom->FREM=0.e00;
}
else if(calc->IREM==1)
{
file_4>>geom->XREM;
file_3<<"There is full removal of the entering (at XB(1)) molecules between "<<geom->XREM<<" and "<<geom->XB[2]<<endl;
// READ (4,*) XREM //geom->XREM
// WRITE (3,*) ' There is full removal of the entering (at XB(1)) molecules between',XREM,' and',XB(2) //geom->XREM ,geom->XB[2]
geom->FREM=1.e00;
}
else if(calc->IREM==2)
{
file_3<<"Molecule removal is specified whenever the program is restarted"<<endl;
// WRITE (3,*) ' Molecule removal is specified whenever the program is restarted'
geom->XREM=geom->XB[1]-1.e00;
geom->FREM=0.e00;
}
else
{
geom->XREM=geom->XB[1]-1.e00;
geom->FREM=0.e00;
}
}
geom->IVB=0;
geom->VELOB=0.e00;
if(geom->ITYPE[2]==1)
{
file_4>>geom->IVB;
// READ (4,*) IVB
if(geom->IVB==0)
file_3<<"The outer boundary is stationary"<<endl;
// WRITE (3,*) ' The outer boundary is stationary'
if(geom->IVB==1)
{
file_3<<"The outer boundary moves with a constant speed"<<endl;
file_4>>geom->VELOB;
file_3<<" The speed of the outer boundary is "<<geom->VELOB<<endl;
// WRITE (3,*) ' The outer boundary moves with a constant speed'
// READ (4,*) VELOB //geom->VELOB
// WRITE (3,*) ' The speed of the outer boundary is',VELOB //geom->VELOB
}
}
file_4>>calc->MOLSC;
file_3<<"The desired number of molecules in a sampling cell is "<<calc->MOLSC<<endl;
// READ (4,*) MOLSC //calc->MOLSC
// WRITE (3,*) 'The desired number of molecules in a sampling cell is',MOLSC ////calc->MOLSC
}
//set the speed of the outer boundary
file_3.close();
file_4.close();
// CLOSE (3)
// CLOSE (4)
// set the stream at the maximum x boundary if there is no secondary stream
if(calc->ISECS==0 && geom->ITYPE[2]==0)
{
gas->FND[2]=gas->FND[1];
gas->FTMP[2]=gas->FTMP[1];
if(gas->MMVM>0)
gas->FVTMP[2]=gas->FVTMP[1];
gas->VFX[2]=gas->VFX[1];
if(gas->MSP>1)
{
for(N=1;N<=gas->MSP;N++)
{
gas->FSP[N][2]=gas->FSP[N][1];
}
}
else
gas->FSP[1][2]=1;
}
//dout
//1234 CONTINUE;
return;
}
void INITIALISE_SAMPLES()
{
//start a new sample for all classes of flow
//CALC calc;
//GEOM_1D geom;
//GAS gas;
//OUTPUT output;
//MOLECS molecs;
int N;
//
output->NSAMP=0.0;
output->TISAMP=calc->FTIME;
output->NMISAMP=molecs->NM;
//memset(output->COLLS,0.e00,sizeof(*output->COLLS));memset(output->WCOLLS,0.e00,sizeof(*output->WCOLLS));memset(output->CLSEP,0.e00,sizeof(*output->CLSEP));
for(int i=0;i<geom->NCELLS+1;i++)
output->COLLS[i]=0.e00;
for(int i=0;i<geom->NCELLS+1;i++)
output->WCOLLS[i]=0.e00;
for(int i=0;i<geom->NCELLS+1;i++)
output->CLSEP[i]=0.e00;
//output->COLLS=0.e00 ; output->WCOLLS=0.e00 ; output->CLSEP=0.e00;
//memset(calc->TCOL,0.0,sizeof(**calc->TCOL));//calc->TCOL=0.0;
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
calc->TCOL[i][j]=0.0;
}
}
//gas->TREACG=0;
//gas->TREACL=0;
for(int i=0;i<5;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->TREACG[i][j]=0;
}
}
for(int i=0;i<5;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->TREACL[i][j]=0;
}
}
//memset(output->CS,0.0,sizeof(***output->CS));memset(output->CSS,0.0,sizeof(****output->CSS));memset(output->CSSS,0.0,sizeof(**output->CSSS));
for(int j=0;j<gas->MSP+10;j++){
for(int k=0;k<geom->NCELLS+1;k++){
for(int l=0;l<gas->MSP+1;l++)
output->CS[j][k][l]=0.0;
}
}
for(int i=0;i<9;i++){
for(int j=0;j<3;j++){
for(int k=0;k<gas->MSP+1;k++){
for(int l=0;l<3;l++)
output->CSS[i][j][k][l]=0.0;
}
}
}
for(int k=0;k<7;k++){
for(int l=0;l<3;l++)
output->CSSS[k][l]=0.0;
}
//output->CS=0.0 ; output->CSS=0.0 ; output->CSSS=0.0;
//memset(output->VIBFRAC,0.e00,sizeof(***output->VIBFRAC));//output->VIBFRAC=0.e00;
//memset(output->SUMVIB,0.e00,sizeof(**output->SUMVIB));//output->SUMVIB=0.e00;
for(int j=0;j<gas->MSP+1;j++){
for(int k=0;k<gas->MMVM+1;k++){
for(int l=0;l<151;l++)
output->VIBFRAC[j][k][l]=0.0;
}
}
for(int k=0;k<gas->MSP+1;k++){
for(int l=0;l<gas->MMVM+1;l++)
output->SUMVIB[k][l]=0.0;
}
}
////
//
void SET_INITIAL_STATE_1D()
{
//set the initial state of a homogeneous or one-dimensional flow
//
//MOLECS molecs;
//GEOM_1D geom;
//GAS gas;
//CALC calc;
//OUTPUT output;
//
//
int J,L,K,KK,KN,II,III,INC,NSET,NSC;
long long N,M;
double A,B,AA,BB,BBB,SN,XMIN,XMAX,WFMIN,DENG,ELTI,EA,XPREV;
double DMOM[4];
double VB[4][3];
double ROTE[3];
//
//NSET the alternative set numbers in the setting of exact initial state
//DMOM(N) N=1,2,3 for x,y and z momentum sums of initial molecules
//DENG the energy sum of the initial molecules
//VB alternative sets of velocity components
//ROTE alternative sets of rotational energy
//EA entry area
//INC counting increment
//ELTI initial electronic temperature
//XPREV the pevious x coordinate
//
//memset(DMOM,0.e00,sizeof(DMOM));
for(int i=0;i<4;i++)
DMOM[i]=0.e00;
DENG=0.e00;
//set the number of molecules, divisions etc. based on stream 1
//
calc->NMI=10000*calc->IMEG+2; //small changes in number for statistically independent runs
geom->NDIV=calc->NMI/calc->MOLSC; //MOLSC molecules per division
//WRITE (9,*) 'The number of divisions is',NDIV
file_9<< "The number of divisions is "<<geom->NDIV<<endl;
//
geom->MDIV=geom->NDIV;
geom->ILEVEL=0;
//
geom->i_allocate(geom->ILEVEL+1,geom->MDIV+1,geom->JDIV);
// ALLOCATE (JDIV(0:ILEVEL,MDIV),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR JDIV ARRAY',ERROR
// ENDIF
//
geom->DDIV=(geom->XB[2]-geom->XB[1])/double(geom->NDIV);
geom->NCELLS=geom->NDIV;
//WRITE (9,*) 'The number of sampling cells is',NCELLS
file_9<<"The number of sampling cells is "<< geom->NCELLS<<endl;
geom->NCIS=calc->MOLSC/calc->NMCC;
geom->NCCELLS=geom->NCIS*geom->NDIV;
//WRITE (9,*) 'The number of collision cells is',NCCELLS
file_9<< "The number of collision cells is "<<geom->NCCELLS<<endl;
//
if(geom->IFX == 0) geom->XS=0.e00;
//
if(calc->ISECS == 0){
if(geom->IFX == 0) calc->FNUM=((geom->XB[2]-geom->XB[1])*gas->FND[1])/double(calc->NMI);
if(geom->IFX == 1) calc->FNUM=PI*(pow(geom->XB[2],2)-pow(geom->XB[1],2))*gas->FND[1]/double(calc->NMI);
if(geom->IFX == 2) calc->FNUM=1.3333333333333333333333e00*PI*(pow(geom->XB[2],3)-pow(geom->XB[1],3))*gas->FND[1]/double(calc->NMI);
}
else{
if(geom->IFX == 0) calc->FNUM=((geom->XS-geom->XB[1])*gas->FND[1]+(geom->XB[2]-geom->XS)*gas->FND[2])/double(calc->NMI);
if(geom->IFX == 1) calc->FNUM=PI*((pow(geom->XS,2)-pow(geom->XB[1],2))*gas->FND[1]+(pow(geom->XB[2],2)-pow(geom->XS,2))*gas->FND[2])/double(calc->NMI);
if(geom->IFX == 2) calc->FNUM=1.3333333333333333333333e00*PI*((pow(geom->XS,3)-pow(geom->XB[1],3))*gas->FND[1]+(pow(geom->XB[2],3)-pow(geom->XS,3))*gas->FND[2])/double(calc->NMI);
}
//
calc->FNUM=calc->FNUM*calc->FNUMF;
if(calc->FNUM < 1.e00) calc->FNUM=1.e00;
//
calc->FTIME=0.e00;
//
calc->TOTMOV=0.e00;
calc->TOTCOL=0.e00;
output->NDISSOC=0;
//memset(calc->TCOL,0.e00,sizeof(**calc->TCOL));//calc->TCOL=0.e00;
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
calc->TCOL[i][j]=0.e00;
}
}
//memset(calc->TDISS,0.e00,sizeof(*calc->TDISS));//calc->TDISS=0.e00;
//memset(calc->TRECOMB,0.e00,sizeof(*calc->TRECOMB));//calc->TRECOMB=0.e00;
for(int i=0;i<gas->MSP+1;i++)
calc->TDISS[i]=0.e00;
for(int i=0;i<gas->MSP+1;i++)
calc->TRECOMB[i]=0.e00;
//gas->TREACG=0;
//gas->TREACL=0;
for(int i=0;i<5;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->TREACG[i][j]=0;
}
}
for(int i=0;i<5;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->TREACL[i][j]=0;
}
}
//memset(gas->TNEX,0.e00,sizeof(*gas->TNEX));//gas->TNEX=0.e00;
for(int i=0;i<gas->MEX+1;i++)
gas->TNEX[i]= 0.e00;
for(N=1;N<=geom->NDIV;N++){
geom->JDIV[0][N]=-N;
}
//
geom->d_allocate(5,geom->NCELLS+1,geom->CELL);
geom->i_allocate(geom->NCELLS+1,geom->ICELL);
geom->d_allocate(6,geom->NCCELLS+1,geom->CCELL);
geom->i_allocate(4,geom->NCCELLS+1,geom->ICCELL);
// ALLOCATE (CELL(4,NCELLS),ICELL(NCELLS),CCELL(5,NCCELLS),ICCELL(3,NCCELLS),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR CELL ARRAYS',ERROR
// ENDIF
//
output->d_allocate(geom->NCELLS+1,output->COLLS);
output->d_allocate(geom->NCELLS+1,output->WCOLLS);
output->d_allocate(geom->NCELLS+1,output->CLSEP);
output->d_allocate(gas->MNSR+1,output->SREAC);
output->d_allocate(24,geom->NCELLS+1,output->VAR);
output->d_allocate(13,geom->NCELLS+1,gas->MSP+1,output->VARSP);
output->d_allocate(36+gas->MSP,3,output->VARS);
output->d_allocate(10+gas->MSP,geom->NCELLS+1,gas->MSP+1,output->CS);
output->d_allocate(9,3,gas->MSP+1,3,output->CSS);
output->d_allocate(7,3,output->CSSS);
// ALLOCATE (COLLS(NCELLS),WCOLLS(NCELLS),CLSEP(NCELLS),SREAC(MNSR),VAR(23,NCELLS),VARSP(0:12,NCELLS,MSP), &
// VARS(0:35+MSP,2),CS(0:9+MSP,NCELLS,MSP),CSS(0:8,2,MSP,2),CSSS(6,2),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR SAMPLING ARRAYS',ERROR
// ENDIF
//
if(gas->MMVM >= 0){
output->d_allocate(gas->MSP+1,gas->MMVM+1,151,output->VIBFRAC);
output->d_allocate(gas->MSP+1,gas->MMVM+1,output->SUMVIB);
// ALLOCATE (VIBFRAC(MSP,MMVM,0:150),SUMVIB(MSP,MMVM),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR RECOMBINATION ARRAYS',ERROR
// ENDIF
}
//
INITIALISE_SAMPLES();
//
//Set the initial cells
for(N=1;N<=geom->NCELLS;N++){
geom->CELL[2][N]=geom->XB[1]+double(N-1)*geom->DDIV;
geom->CELL[3][N]=geom->CELL[2][N]+geom->DDIV;
geom->CELL[1][N]=geom->CELL[2][N]+0.5e00*geom->DDIV;
if(geom->IFX == 0) geom->CELL[4][N]=geom->CELL[3][N]-geom->CELL[2][N]; //calculation assumes unit cross-section
if(geom->IFX == 1) geom->CELL[4][N]=PI*(pow(geom->CELL[3][N],2)-pow(geom->CELL[2][N],2)); //assumes unit length of full cylinder
if(geom->IFX == 2) geom->CELL[4][N]=1.33333333333333333333e00*PI*(pow(geom->CELL[3][N],3)-pow(geom->CELL[2][N],3)); //flow is in the full sphere
geom->ICELL[N]=geom->NCIS*(N-1);
for(M=1;M<=geom->NCIS;M++){
L=geom->ICELL[N]+M;
XMIN=geom->CELL[2][N]+double(M-1)*geom->DDIV/double(geom->NCIS);
XMAX=XMIN+geom->DDIV/double(geom->NCIS);
if(geom->IFX == 0) geom->CCELL[1][L]=XMAX-XMIN;
if(geom->IFX == 1) geom->CCELL[1][L]=PI*(pow(XMAX,2)-pow(XMIN,2)); //assumes unit length of full cylinder
if(geom->IFX == 2) geom->CCELL[1][L]=1.33333333333333333333e00*PI*(pow(XMAX,3)-pow(XMIN,3)); //flow is in the full sphere
geom->CCELL[2][L]=0.e00;
geom->ICCELL[3][L]=N;
}
output->VAR[11][N]=gas->FTMP[1];
output->VAR[8][N]=gas->FTMP[1];
}
//
if(geom->IWF == 0) geom->AWF=1.e00;
if(geom->IWF == 1){
//FNUM must be reduced to allow for the weighting factors
A=0.e00;
B=0.e00;
for(N=1;N<=geom->NCELLS;N++){
A=A+geom->CELL[4][N];
B=B+geom->CELL[4][N]/(1.0+geom->WFM*pow(geom->CELL[1][N],geom->IFX));
}
geom->AWF=A/B;
calc->FNUM=calc->FNUM*B/A;
}
//
//WRITE (9,*) 'FNUM is',FNUM
file_9<<"FNUM is "<<calc->FNUM<<endl;
//
//set the information on the molecular species
//
A=0.e00;
B=0.e00;
for(L=1;L<=gas->MSP;L++){
A=A+gas->SP[5][L]*gas->FSP[L][1];
B=B+(3.0+gas->ISPR[1][L])*gas->FSP[L][1];
gas->VMP[L][1]=sqrt(2.e00*BOLTZ*gas->FTMP[1]/gas->SP[5][L]);
if((geom->ITYPE[2]== 0) || (calc->ISECS == 1)) gas->VMP[L][2]=sqrt(2.e00*BOLTZ*gas->FTMP[2]/gas->SP[5][L]);
calc->VNMAX[L]=3.0*gas->VMP[L][1];
if(L == 1)
gas->VMPM=gas->VMP[L][1];
else
if(gas->VMP[L][1] > gas->VMPM) gas->VMPM=gas->VMP[L][1];
}
//WRITE (9,*) 'VMPM =',VMPM
file_9<< "VMPM = "<<gas->VMPM<<endl;
gas->FDEN=A*gas->FND[1];
gas->FPR=gas->FND[1]*BOLTZ*gas->FTMP[1];
gas->FMA=gas->VFX[1]/sqrt((B/(B+2.e00))*BOLTZ*gas->FTMP[1]/A);
//set the molecular properties for collisions between unlike molecles
//to the average of the molecules
for(L=1;L<=gas->MSP;L++){
for(M=1;M<=gas->MSP;M++){
gas->SPM[4][L][M]=0.5e00*(gas->SP[1][L]+gas->SP[1][M]);
gas->SPM[3][L][M]=0.5e00*(gas->SP[3][L]+gas->SP[3][M]);
gas->SPM[5][L][M]=0.5e00*(gas->SP[2][L]+gas->SP[2][M]);
gas->SPM[1][L][M]=gas->SP[5][L]*(gas->SP[5][M]/(gas->SP[5][L]+gas->SP[5][M]));
gas->SPM[2][L][M]=0.25e00*PI*pow((gas->SP[1][L]+gas->SP[1][M]),2);
AA=2.5e00-gas->SPM[3][L][M];
A=tgamma(AA);
gas->SPM[6][L][M]=1.e00/A;
gas->SPM[8][L][M]=0.5e00*(gas->SP[4][L]+gas->SP[4][M]);
if((gas->ISPR[1][L] > 0) && (gas->ISPR[1][M] > 0))
gas->SPM[7][L][M]=(gas->SPR[1][L]+gas->SPR[1][M])*0.5e00;
if((gas->ISPR[1][L] > 0) && (gas->ISPR[1][M] == 0))
gas->SPM[7][L][M]=gas->SPR[1][L];
if((gas->ISPR[1][M] > 0) && (gas->ISPR[1][L] == 0))
gas->SPM[7][L][M]=gas->SPR[1][M];
}
}
if(gas->MSP == 1){ //set unscripted variables for the simple gas case
gas->RMAS=gas->SPM[1][1][1];
gas->CXSS=gas->SPM[2][1][1];
gas->RGFS=gas->SPM[6][1][1];
}
//
for(L=1;L<=gas->MSP;L++){
gas->CR[L]=0.e00;
for(M=1;M<=gas->MSP;M++){ //set the equilibrium collision rates
gas->CR[L]=gas->CR[L]+2.e00*SPI*pow(gas->SPM[4][L][M],2)*gas->FND[1]*gas->FSP[M][1]*pow((gas->FTMP[1]/gas->SPM[5][L][M]),(1.0-gas->SPM[3][L][M]))*sqrt(2.0*BOLTZ*gas->SPM[5][L][M]/gas->SPM[1][L][M]);
}
}
A=0.e00;
for(L=1;L<=gas->MSP;L++)
A=A+gas->FSP[L][1]*gas->CR[L];
gas->CTM=1.e00/A;
//WRITE (9,*) 'Collision time in the stream is',CTM
file_9<< "Collision time in the stream is "<<gas->CTM;
//
for(L=1;L<=gas->MSP;L++){
gas->FP[L]=0.e00;
for(M=1;M<=gas->MSP;M++){
gas->FP[L]=gas->FP[L]+PI*pow(gas->SPM[4][L][M],2)*gas->FND[1]*gas->FSP[M][1]*pow((gas->FTMP[1]/gas->SPM[5][L][M]),(1.0-gas->SPM[3][L][M]))*sqrt(1.e00+gas->SP[5][L]/gas->SP[5][M]);
}
gas->FP[L]=1.e00/gas->FP[L];
}
gas->FPM=0.e00;
for(L=1;L<=gas->MSP;L++)
gas->FPM=gas->FPM+gas->FSP[L][1]*gas->FP[L];
//WRITE (9,*) 'Mean free path in the stream is',FPM
file_9<<"Mean free path in the stream is "<<gas->FPM<<endl;
//
calc->TNORM=gas->CTM;
if(calc->ICLASS == 1) calc->TNORM= (geom->XB[2]-geom->XB[1])/gas->VMPM; //there may be alternative definitions
//
//set the initial time step
calc->DTM=gas->CTM*calc->CPDTM;
//
if(fabs(gas->VFX[1]) > 1.e-6)
A=(0.5e00*geom->DDIV/gas->VFX[1])*calc->TPDTM;
else
A=0.5e00*geom->DDIV/gas->VMPM;
if(geom->IVB == 1){
B=0.25e00*geom->DDIV/(fabs(geom->VELOB)+gas->VMPM);
if(B < A) A=B;
}
if(calc->DTM > A) calc->DTM=A;
//
calc->DTM=0.1e00*calc->DTM; //OPTIONAL MANUAL ADJUSTMENT that is generally used with a fixed time step (e.g for making x-t diagram)
//
calc->DTSAMP=calc->SAMPRAT*calc->DTM;
calc->DTOUT=calc->OUTRAT*calc->DTSAMP;
calc->TSAMP=calc->DTSAMP;
calc->TOUT=calc->DTOUT;
calc->ENTMASS=0.0;
//
//WRITE (9,*) 'The initial value of the overall time step is',DTM
file_9<< "The initial value of the overall time step is "<<calc->DTM<<endl;
//
//initialise cell quantities associated with collisions
//
for(N=1;N<=geom->NCCELLS;N++){
geom->CCELL[3][N]=calc->DTM/2.e00;
geom->CCELL[4][N]=2.e00*gas->VMPM*gas->SPM[2][1][1];
calc->RANF=((double)rand()/(double)RAND_MAX);
// RANDOM_NUMBER(RANF)
geom->CCELL[2][N]=calc->RANF;
geom->CCELL[5][N]=0.e00;
}
//
//set the entry quantities
//
for(K=1;K<=2;K++){
if((geom->ITYPE[K] == 0) || ((K == 2) && (geom->ITYPE[K] == 4))){
if(geom->IFX == 0) EA=1.e00;
if(geom->IFX == 1) EA=2.e00*PI*geom->XB[K];
if(geom->IFX == 2) EA=4.e00*PI*pow(geom->XB[K],2);
for(L=1;L<=gas->MSP;L++){
if(K == 1) SN=gas->VFX[1]/gas->VMP[L][1];
if(K == 2) SN=-gas->VFX[2]/gas->VMP[L][2];
AA=SN;
A=1.e00+erf(AA);
BB=exp(-pow(SN,2));
gas->ENTR[3][L][K]=SN;
gas->ENTR[4][L][K]=SN+sqrt(pow(SN,2)+2.e00);
gas->ENTR[5][L][K]=0.5e00*(1.e00+SN*(2.e00*SN-gas->ENTR[4][L][K]));
gas->ENTR[6][L][K]=3.e00*gas->VMP[L][K];
B=BB+SPI*SN*A;
gas->ENTR[1][L][K]=EA*gas->FND[K]*gas->FSP[L][K]*gas->VMP[L][K]*B/(calc->FNUM*2.e00*SPI);
gas->ENTR[2][L][K]=0.e00;
}
}
}
//
//Set the uniform stream
//
molecs->MNM=1.1e00*calc->NMI;
//
if(gas->MMVM > 0){
molecs->d_allocate(calc->NCLASS+1,molecs->MNM+1,molecs->PX);
molecs->d_allocate(molecs->MNM+1,molecs->PTIM);
molecs->d_allocate(molecs->MNM+1,molecs->PROT);
molecs->i_allocate(molecs->MNM+1,molecs->IPCELL);
molecs->i_allocate(molecs->MNM+1,molecs->IPSP);
molecs->i_allocate(molecs->MNM+1,molecs->ICREF);
molecs->i_allocate(molecs->MNM+1,molecs->IPCP);
molecs->d_allocate(4,molecs->MNM+1,molecs->PV);
molecs->i_allocate(gas->MMVM+1,molecs->MNM+1,molecs->IPVIB);
molecs->d_allocate(molecs->MNM+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNM),PTIM(MNM),PROT(MNM),IPCELL(MNM),IPSP(MNM),ICREF(MNM),IPCP(MNM),PV(3,MNM), &
// IPVIB(MMVM,MNM),PELE(MNM),STAT=ERROR)
}
else{
if(gas->MMRM > 0){
molecs->d_allocate(calc->NCLASS+1,molecs->MNM+1,molecs->PX);
molecs->d_allocate(molecs->MNM+1,molecs->PTIM);
molecs->d_allocate(molecs->MNM+1,molecs->PROT);
molecs->i_allocate(molecs->MNM+1,molecs->IPCELL);
molecs->i_allocate(molecs->MNM+1,molecs->IPSP);
molecs->i_allocate(molecs->MNM+1,molecs->ICREF);
molecs->i_allocate(molecs->MNM+1,molecs->IPCP);
molecs->d_allocate(4,molecs->MNM+1,molecs->PV);
molecs->d_allocate(molecs->MNM+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNM),PTIM(MNM),PROT(MNM),IPCELL(MNM),IPSP(MNM),ICREF(MNM),IPCP(MNM),PV(3,MNM),PELE(MNM),STAT=ERROR)
}
else{
molecs->d_allocate(calc->NCLASS+1,molecs->MNM+1,molecs->PX);
molecs->d_allocate(molecs->MNM+1,molecs->PTIM);
molecs->i_allocate(molecs->MNM+1,molecs->IPCELL);
molecs->i_allocate(molecs->MNM+1,molecs->IPSP);
molecs->i_allocate(molecs->MNM+1,molecs->ICREF);
molecs->i_allocate(molecs->MNM+1,molecs->IPCP);
molecs->d_allocate(4,molecs->MNM+1,molecs->PV);
molecs->d_allocate(molecs->MNM+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNM),PTIM(MNM),IPCELL(MNM),IPSP(MNM),ICREF(MNM),IPCP(MNM),PV(3,MNM),PELE(MNM),STAT=ERROR)
}
}
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR MOLECULE ARRAYS',ERROR
// ENDIF
//
molecs->NM=0;
if(calc->IGS == 1){
cout<<"Setting the initial gas"<<endl;
for(L=1;L<=gas->MSP;L++){
//memset(ROTE,0.0,sizeof(ROTE));
for(int i=0;i<3;i++)
ROTE[i]=0.0;
for(K=1;K<=calc->ISECS+1;K++){
if(calc->ISECS == 0){ //no secondary stream
M=(double(calc->NMI)*gas->FSP[L][1]*geom->AWF);
XMIN=geom->XB[1];
XMAX=geom->XB[2];
}
else{
A=(pow(geom->XS,geom->JFX)-pow(geom->XB[1],geom->JFX))*gas->FND[1]+(pow(geom->XB[2],geom->JFX)-pow(geom->XS,geom->JFX))*gas->FND[2];
if(K == 1){
M=int(double(calc->NMI)*((pow(geom->XS,geom->JFX)-pow(geom->XB[1],geom->JFX))*gas->FND[1]/A)*gas->FSP[L][1]);
XMIN=geom->XB[1];
XMAX=geom->XS;
}
else{
M=int(double(calc->NMI)*((pow(geom->XB[2],geom->JFX)-pow(geom->XS,geom->JFX))*gas->FND[2]/A)*gas->FSP[L][2]);
XMIN=geom->XS;
XMAX=geom->XB[2];
}
}
if((K == 1) || (calc->ISECS == 1)){
III=0;
WFMIN=1.e00+geom->WFM*pow(geom->XB[1],geom->IFX);
N=1;
INC=1;
if((K== 2) && (geom->JFX > 1)){
BBB=(pow(XMAX,geom->JFX)-pow(XMIN,geom->JFX))/double(M);
XPREV=XMIN;
}
while(N < M){
if((geom->JFX == 1) || (K == 1))
A=pow((pow(XMIN,geom->JFX)+((double(N)-0.5e00)/double(M))*pow((XMAX-XMIN),geom->JFX)),(1.e00/double(geom->JFX)));
else{
A=pow((pow(XPREV,geom->JFX)+BBB),(1.e00/double(geom->JFX)));
XPREV=A;
}
if(geom->IWF == 0)
B=1.e00;
else{
B=WFMIN/(1.e00+geom->WFM*pow(A,geom->IFX));
if((B < 0.1e00) && (INC == 1)) INC=10;
if((B < 0.01e00) && (INC == 10)) INC=100;
if((B < 0.001e00) && (INC == 100)) INC=1000;
if((B < 0.0001e00) && (INC == 1000)) INC=10000;
}
calc->RANF=((double)rand()/(double)RAND_MAX);
// CALL RANDOM_NUMBER(RANF)
if(B*double(INC) > calc->RANF){
molecs->NM=molecs->NM+1;
molecs->PX[1][molecs->NM]=A;
molecs->IPSP[molecs->NM]=L;
molecs->PTIM[molecs->NM]=0.0;
if(geom->IVB == 0) FIND_CELL_1D(molecs->PX[1][molecs->NM],molecs->IPCELL[molecs->NM],KK);
if(geom->IVB == 1) FIND_CELL_MB_1D(molecs->PX[1][molecs->NM],molecs->IPCELL[molecs->NM],KK,molecs->PTIM[molecs->NM]);
//
for(NSET=1;NSET<=2;NSET++){
for(KK=1;KK<=3;KK++){
RVELC(A,B,gas->VMP[L][K]);
if(A < B){
if(DMOM[KK] < 0.e00)
BB=B;
else
BB=A;
}
else{
if(DMOM[KK] < 0.e00)
BB=A;
else
BB=B;
}
VB[KK][NSET]=BB;
}
if(gas->ISPR[1][L] > 0) SROT(L,gas->FTMP[K],ROTE[NSET]);
}
A=(0.5e00*gas->SP[5][L]*(pow(VB[1][1],2)+pow(VB[2][1],2)+pow(VB[3][1],2))+ROTE[1])/(0.5e00*BOLTZ*gas->FTMP[K])-3.e00-double(gas->ISPR[1][L]);
B=(0.5e00*gas->SP[5][L]*(pow(VB[1][2],2)+pow(VB[2][2],2)+pow(VB[3][2],2))+ROTE[2])/(0.5e00*BOLTZ*gas->FTMP[K])-3.e00-double(gas->ISPR[1][L]);
if(A < B){
if(DENG < 0.e00)
KN=2;
else
KN=1;
}
else{
if(DENG < 0.e00)
KN=1;
else
KN=2;
}
for(KK=1;KK<=3;KK++){
molecs->PV[KK][molecs->NM]=VB[KK][KN];
DMOM[KK]=DMOM[KK]+VB[KK][KN];
}
molecs->PV[1][molecs->NM]=molecs->PV[1][molecs->NM]+gas->VFX[K];
molecs->PV[2][molecs->NM]=molecs->PV[2][molecs->NM]+gas->VFY[K];
if(gas->ISPR[1][L] > 0) molecs->PROT[molecs->NM]=ROTE[KN];
// PROT(NM)=0.d00 //uncomment for zero initial rotational temperature (Figs. 6.1 and 6.2)
if(KN == 1) DENG=DENG+A;
if(KN == 2) DENG=DENG+B;
if(gas->MMVM > 0){
if(gas->ISPV[L] > 0){
for(J=1;J<=gas->ISPV[L];J++)
SVIB(L,gas->FVTMP[K],molecs->IPVIB[J][molecs->NM],J);
}
ELTI=gas->FVTMP[K];
if(gas->MELE > 1) SELE(L,ELTI,molecs->PELE[molecs->NM]);
}
}
N=N+INC;
}
}
}
}
//
//WRITE (9,*) 'DMOM',DMOM
//WRITE (9,*) 'DENG',DENG
file_9<<"DMOM "<<DMOM<<endl;
file_9<<"DENG "<<DENG<<endl;
}
//
calc->NMI=molecs->NM;
//
//SPECIAL CODING FOR INITIATION OF COMBUSION IN H2-02 MIXTURE (FORCED IGNITION CASES in section 6.7)
//set the vibrational levels of A% random molecules to 5
// A=0.05D00
// M=0.01D00*A*NM
// DO N=1,M
// CALL RANDOM_NUMBER(RANF)
// K=INT(RANF*DFLOAT(NM))+1
// IPVIB(1,K)=5
// END DO
//
SAMPLE_FLOW();
OUTPUT_RESULTS();
calc->TOUT=calc->TOUT-calc->DTOUT;
return;
}
void MOLECULES_ENTER_1D()
{
//molecules enter boundary at XB(1) and XB(2) and may be removed behind a wave
//MOLECS molecs;
//GAS gas;
//CALC calc;
//GEOM_1D geom;
//OUTPUT output;
//
int K,L,M,N,NENT,II,J,JJ,KK,NTRY;
double A,B,AA,BB,U,VN,XI,X,DX,DY,DZ;
//
//NENT number to enter in the time step
//
calc->ENTMASS=0.e00;
//
for(J=1;J<=2;J++){ //J is the end
if((geom->ITYPE[J] == 0) || (geom->ITYPE[J] == 4)){
KK=1;//the entry surface will normally use the reference gas (main stream) properties
if((J == 2) && (calc->ISECS == 1) && (geom->XB[2] > 0.e00)) KK=2; //KK is 1 for reference gas 2 for the secondary stream
for(L=1;L<=gas->MSP;L++){
A=gas->ENTR[1][L][J]*calc->DTM+gas->ENTR[2][L][J];
if((geom->ITYPE[2] == 4) && (calc->ICN == 1)){
NENT=A;
if(J == 1) calc->EME[L]=NENT;
if(J == 2) {
A=calc->ALOSS[L]-calc->EME[L]-calc->AJM[L];
calc->AJM[L]=0.e00;
if(A < 0.e00){
calc->AJM[L]=-A;
A=0.e00;
}
}
}
NENT=A;
gas->ENTR[2][L][J]=A-NENT;
if((geom->ITYPE[2] == 4) && (J == 2) && (calc->ICN == 1)) gas->ENTR[2][L][J]=0.e00;
if(NENT > 0){
for(M=1;M<=NENT;M++){
if(molecs->NM >= molecs->MNM){
cout<< "EXTEND_MNM from MOLECULES_ENTER "<<endl;
EXTEND_MNM(1.1);
}
molecs->NM=molecs->NM+1;
AA=max(0.e00,gas->ENTR[3][L][J]-3.e00);
BB=max(3.e00,gas->ENTR[3][L][J]+3.e00);
II=0;
while(II == 0){
calc->RANF=((double)rand()/(double)RAND_MAX);
// CALL RANDOM_NUMBER(RANF)
B=AA+(BB-AA)*calc->RANF;
U=B-gas->ENTR[3][L][J];
A=(2.e00*B/gas->ENTR[4][L][J])*exp(gas->ENTR[5][L][J]-U*U);
calc->RANF=((double)rand()/(double)RAND_MAX);
// CALL RANDOM_NUMBER(RANF)
if(A > calc->RANF) II=1;
}
molecs->PV[1][molecs->NM]=B*gas->VMP[L][KK];
if(J == 2) molecs->PV[1][molecs->NM]=-molecs->PV[1][molecs->NM];
//
RVELC(molecs->PV[2][molecs->NM],molecs->PV[3][molecs->NM],gas->VMP[L][KK]);
molecs->PV[2][molecs->NM]=molecs->PV[2][molecs->NM]+gas->VFY[J];
//
if(gas->ISPR[1][L] > 0) SROT(L,gas->FTMP[KK],molecs->PROT[molecs->NM]);
//
if(gas->MMVM > 0){
for(K=1;K<=gas->ISPV[L];K++)
SVIB(L,gas->FVTMP[KK],molecs->IPVIB[K][molecs->NM],K);
}
if(gas->MELE > 1) SELE(L,gas->FTMP[KK],molecs->PELE[molecs->NM]);
//
if(molecs->PELE[molecs->NM] > 0.e00)
continue; //DEBUG
//
molecs->IPSP[molecs->NM]=L;
//advance the molecule into the flow
calc->RANF=((double)rand()/(double)RAND_MAX);
// CALL RANDOM_NUMBER(RANF)
XI=geom->XB[J];
DX=calc->DTM*calc->RANF*molecs->PV[1][molecs->NM];
if((geom->IFX == 0) || (J == 2)) X=XI+DX;
if(J == 1){ //1-D move at outer boundary so molecule remains in flow
if(geom->IFX > 0) DY=calc->DTM*calc->RANF*molecs->PV[2][molecs->NM];
DZ=0.e00;
if(geom->IFX == 2) DZ=calc->DTM*calc->RANF*molecs->PV[3][molecs->NM];
if(geom->IFX > 0) AIFX(XI,DX,DY,DZ,X,molecs->PV[1][molecs->NM],molecs->PV[2][molecs->NM],molecs->PV[3][molecs->NM]);
}
molecs->PX[calc->NCLASS][molecs->NM]=X;
molecs->PTIM[molecs->NM]=calc->FTIME;
if(geom->IVB == 0) FIND_CELL_1D(molecs->PX[calc->NCLASS][molecs->NM],molecs->IPCELL[molecs->NM],JJ);
if(geom->IVB == 1) FIND_CELL_MB_1D(molecs->PX[calc->NCLASS][molecs->NM],molecs->IPCELL[molecs->NM],JJ,molecs->PTIM[molecs->NM]);
molecs->IPCP[molecs->NM]=0;
if(geom->XREM > geom->XB[1]) calc->ENTMASS=calc->ENTMASS+gas->SP[5][L];
}
}
}
if((geom->ITYPE[2] == 4) && (J==2) && (molecs->NM != calc->NMP) && (calc->ICN == 1))
continue;
}
}
//
//stagnation streamline molecule removal
if(geom->XREM > geom->XB[1]){
calc->ENTMASS=geom->FREM*calc->ENTMASS;
NTRY=0;
calc->ENTMASS=calc->ENTMASS+calc->ENTREM;
while((calc->ENTMASS > 0.e00) && (NTRY < 10000)){
NTRY=NTRY+1;
if(NTRY == 10000){
cout<<"Unable to find molecule for removal"<<endl;
calc->ENTMASS=0.e00;
//memset(calc->VNMAX,0.e00,sizeof(*calc->VNMAX));//calc->VNMAX=0.e00;
for(int i=0;i<gas->MSP+1;i++)
calc->VNMAX[i]=0.e00;
}
calc->RANF=((double)rand()/(double)RAND_MAX);
// CALL RANDOM_NUMBER(RANF)
N=molecs->NM*calc->RANF+0.9999999e00;
if(molecs->PX[calc->NCLASS][N] > geom->XREM){
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
//IF (RANF < ((PX(N)-XREM)/(XB(2)-XREM))**2) THEN
if(fabs(gas->VFY[1]) < 1.e-3)
VN=sqrt(molecs->PV[2][N]*molecs->PV[2][N]+molecs->PV[3][N]*molecs->PV[3][N]); //AXIALLY SYMMETRIC STREAMLINE
else
VN=fabs(molecs->PV[3][N]); //TWO-DIMENSIONAL STREAMLINE
L=molecs->IPSP[N];
if(VN > calc->VNMAX[L]) calc->VNMAX[L]=VN;
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
if(calc->RANF < VN/calc->VNMAX[L]){
REMOVE_MOL(N);
calc->ENTMASS=calc->ENTMASS-gas->SP[5][L];
NTRY=0;
}
//END IF
}
}
calc->ENTREM=calc->ENTMASS;
}
}
void FIND_CELL_1D(double &X,int &NCC,int &NSC)
{
//find the collision and sampling cells at a givem location in a 0D or 1D case
//MOLECS molecs;
//GEOM_1D geom;
//CALC calc;
int N,L,M,ND;
double FRAC,DSC;
//
//NCC collision cell number
//NSC sampling cell number
//X location
//ND division number
//DSC the ratio of the sub-division width to the division width
//
ND=(X-geom->XB[1])/geom->DDIV+0.99999999999999e00;
//
if(geom->JDIV[0][ND] < 0){ //the division is a level 0 (no sub-division) sampling cell
NSC=-geom->JDIV[0][ND];
// IF (IFX == 0)
NCC=geom->NCIS*(X-geom->CELL[2][NSC])/(geom->CELL[3][NSC]-geom->CELL[2][NSC])+0.9999999999999999e00;
NCC=NCC+geom->ICELL[NSC];
// IF (NCC == 0) NCC=1
return;
}
else{ //the molecule is in a subdivided division
FRAC=(X-geom->XB[1])/geom->DDIV-double(ND-1);
M=ND;
for(N=1;N<=geom->ILEVEL;N++){
DSC=1.e00/double(N+1);
for(L=1;L<=2;L++){ //over the two level 1 subdivisions
if(((L == 1) && (FRAC < DSC)) || ((L == 2) || (FRAC >= DSC))){
M=geom->JDIV[N-1][M]+L; //the address in JDIV
if(geom->JDIV[N][M] < 0){
NSC=-geom->JDIV[N][M];
NCC=geom->NCIS*(X-geom->CELL[2][NSC])/(geom->CELL[3][NSC]-geom->CELL[2][NSC])+0.999999999999999e00;
if(NCC == 0) NCC=1;
NCC=NCC+geom->ICELL[NSC];
return;
}
}
}
FRAC=FRAC-DSC;
}
}
file_9<<"No cell for molecule at x= "<<X<<endl;
return ;
}
void FIND_CELL_MB_1D(double &X,int &NCC,int &NSC,double &TIM)
{
//find the collision and sampling cells at a givem location in a 0D or 1D case
//when there is a moving boundary
//MOLECS molecs;
//GEOM_1D geom;
//CALC calc;
//
// IMPLICIT NONE
//
int N,L,M,ND;
double FRAC,DSC,A,B,C;
//
//NCC collision cell number
//NSC sampling cell number
//X location
//ND division number
//DSC the ratio of the sub-division width to the division width
//TIM the time
//
A=(geom->XB[2]+geom->VELOB*TIM-geom->XB[1])/double(geom->NDIV); //new DDIV
ND=(X-geom->XB[1])/A+0.99999999999999e00;
B=geom->XB[1]+double(ND-1)*A;
//
//the division is a level 0 sampling cell
NSC=-geom->JDIV[0][ND];
NCC=geom->NCIS*(X-B)/A+0.99999999999999e00;
NCC=NCC+geom->ICELL[NSC];
//WRITE (9,*) 'No cell for molecule at x=',X
file_9<< "No cell for molecule at x= "<<X<<endl;
return;
//return ;
//
}
void RVELC(double &U,double &V,double &VMP)
{
//CALC calc;
//generates two random velocity components U and V in an equilibrium
//gas with most probable speed VMP
//based on equations (4.4) and (4.5)
double A,B;
//
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
A=sqrt(-log(calc->RANF));
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
B=DPI*calc->RANF;
U=A*sin(B)*VMP;
V=A*cos(B)*VMP;
return;
}
void SROT(int &L,double &TEMP,double &ROTE)
{
//sets a typical rotational energy ROTE of species L
//CALC calc;
//GAS gas;
//
// IMPLICIT NONE
//
int I;
double A,B,ERM;
//
if(gas->ISPR[1][L] == 2){
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
ROTE=-log(calc->RANF)*BOLTZ*TEMP; //equation (4.8)
}
else{
A=0.5e00*gas->ISPR[1][L]-1.e00;
I=0;
while(I == 0){
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
ERM=calc->RANF*10.e00;
//there is an energy cut-off at 10 kT
B=(pow((ERM/A),A))*exp(A-ERM); //equation (4.9)
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
if(B > calc->RANF) I=1;
}
ROTE=ERM*BOLTZ*TEMP;
}
return;
}
void SVIB(int &L,double &TEMP,int &IVIB, int &K)
{
//sets a typical vibrational state at temp. TEMP of mode K of species L
//GAS gas;
//CALC calc;
//
// IMPLICIT NONE
//
int N;
// double TEMP;
// int IVIB;
//
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
N=-log(calc->RANF)*TEMP/gas->SPVM[1][K][L]; //eqn(4.10)
//the state is truncated to an integer
IVIB=N;
}
void SELE(int &L,double &TEMP, double &ELE)
{
//sets a typical electronic energy at temp. TEMP of species L
//employs direct sampling from the Boltzmann distribution
//GAS gas;
//CALC calc;
//
// IMPLICIT NONE
//
int K,N;
double EPF,A,B;
double CTP[20];
//
//ELE electronic energy of a molecule
//EPF electronic partition function
//CTP(N) contribution of electronic level N to the electronic partition function
//
if(TEMP > 0.1){
EPF=0.e00;
for(N=1;N<=gas->NELL[L];N++)
EPF=EPF+gas->QELC[1][N][L]*exp(-gas->QELC[2][N][L]/TEMP) ;
//
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
//
A=0.0;
K=0; //becomes 1 when the energy is set
N=0; //level
while(K == 0){
N=N+1;
A=A+gas->QELC[1][N][L]*exp(-gas->QELC[2][N][L]/TEMP);
B=A/EPF;
if(calc->RANF < B){
K=1;
ELE=BOLTZ*gas->QELC[2][N][L];
}
}
}
else
ELE=0.e00;
//
}
void CQAX(double &A,double &X,double &GAX)
{
//calculates the function Q(a,x)=Gamma(a,x)/Gamma(a)
//
// IMPLICIT NONE
double G,DT,T,PV,V;
int NSTEP,N;
//
G=tgamma(A);
//
if(X < 10.e00){ //direct integration
NSTEP=100000;
DT=X/double(NSTEP);
GAX=0.e00;
PV=0.e00;
for(N=1;N<=NSTEP;N++){
T=double(N)*DT;
V=exp(-T)*pow(T,(A-1));
GAX=GAX+(PV+V)*DT/2.e00;
PV=V;
}
GAX=1.e00-GAX/G;
}
else{ //asymptotic formula
GAX=pow(X,(A-1.e00))*exp(-X)*(1.0+(A-1.e00)/X+(A-1.e00)*(A-2.e00)/pow(X,2)+(A-1.e00)*(A-2.e00)*(A-3.e00)/pow(X,3)+(A-1.e00)*(A-2.e00)*(A-3.e00)*(A-4.e00)/pow(X,4));
GAX=GAX/G;
}
//
return;
}
//*****************************************************************************
//
void LBS(double XMA,double XMB,double &ERM)
{
//selects a Larsen-Borgnakke energy ratio using eqn (11.9)
//
double PROB,RANF;
int I,N;
//
//I is an indicator
//PROB is a probability
//ERM ratio of rotational to collision energy
//XMA degrees of freedom under selection-1
//XMB remaining degrees of freedom-1
//
I=0;
while(I == 0){
// CALL RANDOM_NUMBER(RANF)
RANF=((double)rand()/(double)RAND_MAX);
ERM=RANF;
if((XMA < 1.e-6) || (XMB < 1.e-6)){
// IF (XMA < 1.E-6.AND.XMB < 1.E-6) RETURN
//above can never occur if one mode is translational
if(XMA < 1.e-6) PROB=pow((1.e00-ERM),XMB);
if(XMB < 1.e-6) PROB=pow((1.e00-ERM),XMA);
}
else
PROB=pow(((XMA+XMB)*ERM/XMA),XMA)*pow(((XMA+XMB)*(1.e00-ERM)/XMB),XMB);
// CALL RANDOM_NUMBER(RANF)
RANF=((double)rand()/(double)RAND_MAX);
if(PROB > RANF) I=1;
}
//
return;
}
void REFLECT_1D(int &N,int J,double &X)
{
//reflects molecule N and samples the surface J properties
//MOLECS molecs;
//GAS gas;
//GEOM_1D geom;
//CALC calc;
//OUTPUT output;
//
// IMPLICIT NONE
//
int L,K,M;
double A,B,VMPS,DTR,XI,DX,DY,DZ,WF;
//
//VMPS most probable velocity at the surface temperature
//DTR time remaining after molecule hits a surface
//
L=molecs->IPSP[N];
WF=1.e00;
if(geom->IWF == 1) WF=1.e00+geom->WFM*pow(X,geom->IFX);
output->CSS[0][J][L][1]=output->CSS[0][J][L][1]+1.e00;
output->CSS[1][J][L][1]=output->CSS[1][J][L][1]+WF;
output->CSS[2][J][L][1]=output->CSS[2][J][L][1]+WF*molecs->PV[1][N]*gas->SP[5][L];
output->CSS[3][J][L][1]=output->CSS[3][J][L][1]+WF*(molecs->PV[2][N]-gas->VSURF[J])*gas->SP[5][L];
output->CSS[4][J][L][1]=output->CSS[4][J][L][1]+WF*molecs->PV[3][N]*gas->SP[5][L];
A=pow(molecs->PV[1][N],2)+pow((molecs->PV[2][N]-gas->VSURF[J]),2)+pow(molecs->PV[3][N],2);
output->CSS[5][J][L][1]=output->CSS[5][J][L][1]+WF*0.5e00*gas->SP[5][L]*A;
if(gas->ISPR[1][L] > 0) output->CSS[6][J][L][1]=output->CSS[6][J][L][1]+WF*molecs->PROT[N];
if(gas->MELE > 1) output->CSS[8][J][L][1]=output->CSS[8][J][L][1]+WF*molecs->PELE[N];
if(gas->MMVM > 0){
if(gas->ISPV[L] > 0){
for(K=1;K<=gas->ISPV[L];K++)
output->CSS[7][J][L][1]=output->CSS[7][J][L][1]+WF*double(molecs->IPVIB[K][N])*BOLTZ*gas->SPVM[1][K][L];
}
}
A=pow(molecs->PV[1][N],2)+pow(molecs->PV[2][N],2)+pow(molecs->PV[3][N],2);
B=fabs(molecs->PV[1][N]);
output->CSSS[1][J]=output->CSSS[1][J]+WF/B;
output->CSSS[2][J]=output->CSSS[2][J]+WF*gas->SP[5][L]/B;
output->CSSS[3][J]=output->CSSS[3][J]+WF*gas->SP[5][L]*molecs->PV[2][N]/B;
//this assumes that any flow normal to the x direction is in the y direction
output->CSSS[4][J]=output->CSSS[4][J]+WF*gas->SP[5][L]*A/B;
if(gas->ISPR[1][L] > 0){
output->CSSS[5][J]=output->CSSS[5][J]+WF*molecs->PROT[N]/B;
output->CSSS[6][J]=output->CSSS[6][J]+WF*gas->ISPR[1][L]/B;
}
//
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
if(gas->FSPEC[J] > calc->RANF){ //specular reflection
X=2.e00*geom->XB[J]-X;
molecs->PV[1][N]=-molecs->PV[1][N];
DTR=(X-geom->XB[J])/molecs->PV[1][N];
}
else{ //diffuse reflection
VMPS=sqrt(2.e00*BOLTZ*gas->TSURF[J]/gas->SP[5][L]);
DTR=(geom->XB[J]-molecs->PX[1][N])/molecs->PV[1][N];
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
molecs->PV[1][N]=sqrt(-log(calc->RANF))*VMPS;
if(J == 2) molecs->PV[1][N]=-molecs->PV[1][N];
RVELC(molecs->PV[2][N],molecs->PV[3][N],VMPS);
molecs->PV[2][N]=molecs->PV[2][N]+gas->VSURF[J];
if(gas->ISPR[1][L] > 0) SROT(L,gas->TSURF[J],molecs->PROT[N]);
if(gas->MMVM > 0){
for(K=1;K<=gas->ISPV[L];K++)
SVIB(L,gas->TSURF[J],molecs->IPVIB[K][N],K);
}
if(gas->MELE > 1) SELE(L,gas->TSURF[J],molecs->PELE[N]);
}
//
output->CSS[2][J][L][2]=output->CSS[2][J][L][2]-WF*molecs->PV[1][N]*gas->SP[5][L];
output->CSS[3][J][L][2]=output->CSS[3][J][L][2]-WF*(molecs->PV[2][N]-gas->VSURF[J])*gas->SP[5][L];
output->CSS[4][J][L][2]=output->CSS[4][J][L][2]-WF*molecs->PV[3][N]*gas->SP[5][L];
A=pow(molecs->PV[1][N],2)+pow((molecs->PV[2][N]-gas->VSURF[J]),2)+pow(molecs->PV[3][N],2);
output->CSS[5][J][L][2]=output->CSS[5][J][L][2]-WF*0.5e00*gas->SP[5][L]*A;
if(gas->ISPR[1][L] > 0) output->CSS[6][J][L][2]=output->CSS[6][J][L][2]-WF*molecs->PROT[N];
if(gas->MELE > 1) output->CSS[8][J][L][2]=output->CSS[8][J][L][2]-WF*molecs->PELE[N];
if(gas->MMVM > 0){
if(gas->ISPV[L] > 0){
for(K=1;K<=gas->ISPV[L];K++)
output->CSS[7][J][L][2]=output->CSS[7][J][L][2]-WF*double(molecs->IPVIB[K][N])*BOLTZ*gas->SPVM[1][K][L];
}
}
A=pow(molecs->PV[1][N],2)+pow(molecs->PV[2][N],2)+pow(molecs->PV[3][N],2);
B=fabs(molecs->PV[1][N]);
output->CSSS[1][J]=output->CSSS[1][J]+WF/B;
output->CSSS[2][J]=output->CSSS[2][J]+WF*gas->SP[5][L]/B;
output->CSSS[3][J]=output->CSSS[3][J]+WF*gas->SP[5][L]*molecs->PV[2][N]/B;
//this assumes that any flow normal to the x direction is in the y direction
output->CSSS[4][J]=output->CSSS[4][J]+WF*gas->SP[5][L]*A/B;
if(gas->ISPR[1][L] > 0){
output->CSSS[5][J]=WF*output->CSSS[5][J]+molecs->PROT[N]/B;
output->CSSS[6][J]=output->CSSS[6][J]+WF*gas->ISPR[1][L]/B;
}
//
XI=geom->XB[J];
DX=DTR*molecs->PV[1][N];
DZ=0.e00;
if(geom->IFX > 0) DY=DTR*molecs->PV[2][N];
if(geom->IFX == 2) DZ=DTR*molecs->PV[3][N];
if(geom->IFX == 0) X=XI+DX;
if(geom->IFX > 0) AIFX(XI,DX,DY,DZ,X,molecs->PV[1][N],molecs->PV[2][N],molecs->PV[3][N]);
//
return;
}
void RBC(double &XI, double &DX, double &DY,double &DZ, double &R,double &S)
{
//calculates the trajectory fraction S from a point at radius XI with
//note that the axis is in the y direction
//--displacements DX, DY, and DZ to a possible intersection with a
//--surface of radius R, IFX=1, 2 for cylindrical, spherical geometry
//MOLECS molecs;
//GAS gas;
//GEOM_1D geom;
//CALC calc;
//OUTPUT output;
//
// IMPLICIT NONE
//
double A,B,C,DD,S1,S2;
//
DD=DX*DX+DZ*DZ;
if(geom->IFX == 2) DD=DD+DY*DY;
B=XI*DX/DD;
C=(XI*XI-R*R)/DD;
A=B*B-C;
if(A >= 0.e00){
//find the least positive solution to the quadratic
A=sqrt(A);
S1=-B+A;
S2=-B-A;
if(S2 < 0.e00){
if(S1 > 0.e00)
S=S1;
else
S=2.e00;
}
else if(S1 < S2)
S=S1;
else
S=S2;
}
else
S=2.e00;
//setting S to 2 indicates that there is no intersection
return;
//
}
void AIFX(double &XI,double &DX, double &DY, double &DZ, double &X, double &U, double &V, double &W)
{
//
//calculates the new radius and realigns the velocity components in
//--cylindrical and spherical flows
//MOLECS molecs;
//GAS gas;
//GEOM_1D geom;
//CALC calc;
//OUTPUT output;
//
// IMPLICIT NONE
//
//INTEGER ::
double A,B,C,DR,VR,S;
//
if(geom->IFX == 1){
DR=DZ;
VR=W;
}
else if(geom->IFX == 2){
DR=sqrt(DY*DY+DZ*DZ);
VR=sqrt(V*V+W*W);
}
A=XI+DX;
X=sqrt(A*A+DR*DR);
S=DR/X;
C=A/X;
B=U;
U=B*C+VR*S;
W=-B*S+VR*C;
if(geom->IFX == 2){
VR=W;
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
A=DPI*calc->RANF;
V=VR*sin(A);
W=VR*cos(A);
}
//
return;
//
}
void REMOVE_MOL(int &N)
{
//remove molecule N and replaces it by NM
//MOLECS molecs;
//CALC calc;
//GEOM_1D geom;
//GAS gas;
// IMPLICIT NONE
//
int NC,M,K;
//N the molecule number
//M,K working integer
//
if(N != molecs->NM){
for(M=1;M<=calc->NCLASS;M++)
molecs->PX[M][N]=molecs->PX[M][molecs->NM];
for(M=1;M<=3;M++)
molecs->PV[M][N]=molecs->PV[M][molecs->NM];
if(gas->MMRM > 0) molecs->PROT[N]=molecs->PROT[molecs->NM];
molecs->IPCELL[N]=fabs(molecs->IPCELL[molecs->NM]);
molecs->IPSP[N]=molecs->IPSP[molecs->NM];
molecs->IPCP[N]=molecs->IPCP[molecs->NM];
if(gas->MMVM > 0){
for(M=1;M<=gas->MMVM;M++)
molecs->IPVIB[M][N]=molecs->IPVIB[M][molecs->NM];
}
if(gas->MELE > 1) molecs->PELE[N]=molecs->PELE[molecs->NM];
molecs->PTIM[N]=molecs->PTIM[molecs->NM];
}
molecs->NM=molecs->NM-1;
//
return;
//
}
void INDEX_MOLS()
{
//index the molecules to the collision cells
//MOLECS molecs;
//CALC calc;
//GEOM_1D geom;
// IMPLICIT NONE
//
int N,M,K;
//
//N,M,K working integer
//
for(N=1;N<=geom->NCCELLS;N++)
geom->ICCELL[2][N]=0;
//
if(molecs->NM != 0){
for(N=1;N<=molecs->NM;N++){
M=molecs->IPCELL[N];
geom->ICCELL[2][M]=geom->ICCELL[2][M]+1;
}
//
M=0;
for(N=1;N<=geom->NCCELLS;N++){
geom->ICCELL[1][N]=M;
M=M+geom->ICCELL[2][N];
geom->ICCELL[2][N]=0;
}
//
for(N=1;N<=molecs->NM;N++){
M=molecs->IPCELL[N];
geom->ICCELL[2][M]=geom->ICCELL[2][M]+1;
K=geom->ICCELL[1][M]+geom->ICCELL[2][M];
molecs->ICREF[K]=N;
}
//cin.get();
//
}
return;
}
void SAMPLE_FLOW()
{
//sample the flow properties
//MOLECS molecs;
//CALC calc;
//GEOM_1D geom;
//GAS gas;
//OUTPUT output;
//
// IMPLICIT NONE
//
int NC,NCC,LS,N,M,K,L,I,KV;
double A,TE,TT,WF;
//
//NC the sampling cell number
//NCC the collision cell number
//LS the species code
//N,M,K working integers
//TE total translational energy
//
output->NSAMP=output->NSAMP+1;
cout<<"Sample \t"<<output->NSAMP<<endl<<endl;
//WRITE (9,*) NM,'Mols. at sample',NSAMP
file_9<<molecs->NM<<" Mols. at sample "<<output->NSAMP<<endl;
//
for(N=1;N<=molecs->NM;N++){
NCC=molecs->IPCELL[N];
NC=geom->ICCELL[3][NCC];
WF=1.e00;
if(geom->IWF == 1) WF=1.e00+geom->WFM*pow(molecs->PX[1][N],geom->IFX);
if((NC > 0) && (NC <= geom->NCELLS)){
if(gas->MSP > 1)
LS=fabs(molecs->IPSP[N]);
else
LS=1;
output->CS[0][NC][LS]=output->CS[0][NC][LS]+1.e00;
output->CS[1][NC][LS]=output->CS[1][NC][LS]+WF;
for(M=1;M<=3;M++){
output->CS[M+1][NC][LS]=output->CS[M+1][NC][LS]+WF*molecs->PV[M][N];
output->CS[M+4][NC][LS]=output->CS[M+4][NC][LS]+WF*pow(molecs->PV[M][N],2);
}
if(gas->MMRM > 0) output->CS[8][NC][LS]=output->CS[8][NC][LS]+WF*molecs->PROT[N];
if(gas->MELE > 1) output->CS[9][NC][LS]=output->CS[9][NC][LS]+WF*molecs->PELE[N];
if(gas->MMVM > 0){
if(gas->ISPV[LS] > 0){
for(K=1;K<=gas->ISPV[LS];K++)
output->CS[K+9][NC][LS]=output->CS[K+9][NC][LS]+WF*double(molecs->IPVIB[K][N]);
}
}
}
else{
cout<<"Illegal sampling cell "<<NC<<" "<<NCC<<" for MOL "<<N<<" at "<<molecs->PX[1][N]<<endl;
return;
}
}
//
if(calc->FTIME > 0.5e00*calc->DTM) calc->TSAMP=calc->TSAMP+calc->DTSAMP;
//
return;
}
void ADAPT_CELLS_1D()
{
//adapt the sampling cells through the splitting of the divisions into successive levels
//the collision cells are divisions of the sampling cells
//MOLECS molecs;
//GAS gas;
//GEOM_1D geom;
//CALC calc;
//OUTPUT output;
//
// IMPLICIT NONE
//
int M,N,L,K,KK,I,J,JJ,MSEG,NSEG,NSEG1,NSEG2,MLEVEL;
double A,B,DDE,DCRIT;
int *KDIV,*NC;
int **ISD;
double *XMIN,*XMAX,*DRAT;
// INTEGER, ALLOCATABLE, DIMENSION(:) :: KDIV,NC
// INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ISD
// REAL(KIND=8), ALLOCATABLE, DIMENSION(:) :: XMIN,XMAX,DRAT
//
//DCRIT the number density ratio that causes a cell to be subdivided
//KDIV(N) the number of divisions/subdivisions (cells or further subdivisions) at level N
//DRAT(N) the contriburion to the density ratio of element N
//NC(I) the number of sampling cells at level I
//DDE the width of an element
//MSEG the maximum number of segments (a segment is the size of the smallest subdivision
//NSEG1 the (first segment-1) in the subdivision
//NSEG2 the final segment in the subdivision
//ISD(N,M) 0,1 for cell,subdivided for level N subdivision
//MLEVEL The maximum desired level ILEVEL of subdivision (cellS are proportional to 2**ILEVEL)
//
DCRIT=1.5e00; //may be altered
MLEVEL=2; //may be altered
//
//determine the level to which the divisions are to be subdivided
//
A=1.e00;
for(N=1;N<=geom->NCELLS;N++)
if(output->VAR[3][N]/gas->FND[1] > A) A=output->VAR[3][N]/gas->FND[1];
geom->ILEVEL=0;
while(A > DCRIT){
geom->ILEVEL=geom->ILEVEL+1;
A=A/2.e00;
}
if(geom->ILEVEL > MLEVEL) geom->ILEVEL=MLEVEL;
//WRITE (9,*) 'ILEVEL =',ILEVEL
file_9<<"ILEVEL = "<<geom->ILEVEL<<endl;
NSEG=pow(2,geom->ILEVEL);
MSEG=geom->NDIV*NSEG;
//
KDIV = new int[geom->ILEVEL+1];
DRAT = new double[MSEG+1];
NC = new int[geom->ILEVEL+1];
ISD = new int*[geom->ILEVEL+1];
for(int i =0; i< (geom->ILEVEL+1); ++i)
ISD[i] = new int[MSEG+1];
// ALLOCATE (KDIV(0:ILEVEL),DRAT(MSEG),NC(0:ILEVEL),ISD(0:ILEVEL,MSEG),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR KDIV ARRAY',ERROR
// ENDIF
//
DDE=(geom->XB[2]-geom->XB[1])/double(MSEG);
for(N=1;N<=MSEG;N++){
A=geom->XB[1]+(double(N)-0.5e00)*DDE;
FIND_CELL_1D(A,M,L);
DRAT[N]=output->VAR[3][L]/(gas->FND[1]*double(NSEG));
}
//
//calculate the number of subdivisions at the various levels of subdivision
KDIV=0;
//also the number of sampling cells at each level
NC=0;
//
for(N=1;N<=geom->NDIV;N++){ //divisions
ISD=0;
ISD[0][1]=1;
KDIV[0]=KDIV[0]+1;
// WRITE (9,*) 'DIVISION',N
for(I=0;I<=geom->ILEVEL;I++){ //level of subdivision
// WRITE (9,*) 'LEVEL',I
J=pow(2,I); //number of possible subdivisions at this level
JJ=NSEG/J; //number of segments in a subdivision
for(M=1;M<=J;M++){
// WRITE (9,*) 'SUBDIVISION',M
if(ISD[I][M] == 1){
NSEG1=(N-1)*NSEG+(M-1)*JJ+1;
NSEG2=NSEG1+JJ-1;
A=0.e00;
// WRITE (9,*) 'NSEG RANGE',NSEG1,NSEG2
for(L=NSEG1;L<=NSEG2;L++)
A=A+DRAT[L];
// WRITE (9,*) 'DENS CONTRIB',A
if(A < DCRIT){
NC[I]=NC[I]+1;
// WRITE (9,*) 'LEVEL',I,' CELLS TO', NC(I)
}
else{
KDIV[I+1]=KDIV[I+1]+2;
// WRITE (9,*) 'LEVEL',I+1,' SUBDIVISIONS TO',KDIV(I+1)
for(L=NSEG1-(N-1)*NSEG;L<=NSEG2-(N-1)*NSEG;L++)
ISD[I+1][L]=1;
}
}
}
}
}
//
//WRITE (9,*) 'KDIV',KDIV
file_9<<"KDIV "<<KDIV<<endl;
//
//WRITE (9,*) 'NC',NC
file_9<< "NC "<<NC<<endl;
cin.get();
//WRITE (9,*) 'Number of divisions',NDIV
file_9<<"Number of divisions "<<geom->NDIV<<endl;
A=0;
geom->NCELLS=0;
for(N=0;N<=geom->ILEVEL;N++){
A=A+double(NC[N])/(pow(2.e00,N));
geom->NCELLS=geom->NCELLS+NC[N];
}
//WRITE (9,*) 'Total divisions from sampling cells',A
//WRITE (9,*) 'Adapted sampling cells',NCELLS
file_9<< "Total divisions from sampling cells "<<A<<endl;
file_9<< "Adapted sampling cells "<<geom->NCELLS<<endl;
geom->NCCELLS=geom->NCELLS*geom->NCIS;
//WRITE (9,*) 'Adapted collision cells',NCCELLS
file_9<< "Adapted collision cells "<<geom->NCCELLS<<endl;
//
for (int i = 0; i < geom->ILEVEL+1; i++) {
hipFree(geom->JDIV[i]); //delete [] geom->JDIV[i];
}
hipFree(geom->JDIV); //delete [] geom->JDIV; // <- because they won't exist anymore after this
for (int i = 0; i < 5; i++) {
hipFree(geom->CELL[i]); //delete [] geom->CELL[i];
}
hipFree(geom->CELL); //delete [] geom->CELL; // <- because they won't exist anymore after this
hipFree(geom->ICELL); //delete[] geom->ICELL;
for (int i = 0; i < 6; i++) {
hipFree(geom->CCELL[i]); //delete [] geom->CCELL[i];
}
hipFree(geom->CCELL); //delete [] geom->CCELL; // <- because they won't exist anymore after this
for (int i = 0; i < 4; i++) {
hipFree(geom->ICCELL[i]); //delete [] geom->ICCELL[i];
}
hipFree(geom->ICCELL); //delete [] geom->ICCELL; // <- because they won't exist anymore after this
hipFree(output->COLLS); //delete[] output->COLLS;
hipFree(output->WCOLLS); //delete[] output->WCOLLS;
hipFree(output->CLSEP); //delete[] output->CLSEP;
for (int i = 0; i < 24; i++) {
hipFree(output->VAR[i]); //delete [] output->VAR[i];
}
hipFree(output->VAR); //delete [] output->VAR; // <- because they won't exist anymore after this
for(int i = 0; i < 13; i++)
{
for(int j = 0; j < geom->NCELLS+1; j++)
{
hipFree(output->VARSP[i][j]); //delete [] output->VARSP[i][j];
}
hipFree(output->VARSP[i]); //delete [] output->VARSP[i];
}
hipFree(output->VARSP); //delete [] output->VARSP;
for(int i = 0; i < (10+gas->MSP); i++)
{
for(int j = 0; j < geom->NCELLS+1; j++)
{
hipFree(output->CS[i][j]); //delete [] output->CS[i][j];
}
hipFree(output->CS[i]); //delete [] output->CS[i];
}
hipFree(output->CS); //delete [] output->CS;
/*DEALLOCATE (JDIV,CELL,ICELL,CCELL,ICCELL,COLLS,WCOLLS,CLSEP,VAR,VARSP,CS,STAT=ERROR)
IF (ERROR /= 0) THEN
WRITE (*,*)'PROGRAM COULD NOT DEALLOCATE ARRAYS IN ADAPT',ERROR
END IF*/
//
for(N=0;N<=geom->ILEVEL;N++)
if(KDIV[N] > geom->MDIV) geom->MDIV=KDIV[N];
//
geom->i_allocate(geom->ILEVEL+1,geom->MDIV, geom->JDIV);
// ALLOCATE (JDIV(0:ILEVEL,MDIV),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR JDIV ARRAY IN ADAPT',ERROR
// ENDIF
//
geom->d_allocate(5,geom->NCELLS+1, geom->CELL);
geom->i_allocate(geom->NCELLS+1, geom->ICELL);
geom->d_allocate(6, geom->NCCELLS+1, geom->CCELL);
geom->i_allocate(4, geom->NCCELLS+1,geom->ICCELL);
XMIN= new double[geom->NCCELLS+1];
XMAX = new double[geom->NCCELLS+1];
//
// ALLOCATE (CELL(4,NCELLS),ICELL(NCELLS),CCELL(5,NCCELLS),ICCELL(3,NCCELLS),XMIN(NCCELLS),XMAX(NCCELLS),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR CELL ARRAYS IN ADAPT',ERROR
// ENDIF
//
output->d_allocate(geom->NCELLS+1,output->COLLS);
output->d_allocate(geom->NCELLS+1, output->WCOLLS);
output->d_allocate(geom->NCELLS+1, output->CLSEP);
output->d_allocate(24, geom->NCELLS+1, output->VAR);
output->d_allocate(13,geom->NCELLS+1,gas->MSP+1, output->VARSP);
output->d_allocate(10+gas->MSP+1,geom->NCELLS+1,gas->MSP+1,output->CS);
// ALLOCATE (COLLS(NCELLS),WCOLLS(NCELLS),CLSEP(NCELLS),VAR(23,NCELLS),VARSP(0:12,NCELLS,MSP),CS(0:9+MSP,NCELLS,MSP),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR SAMPLING ARRAYS IN ADAPT',ERROR
// ENDIF
//
geom->NCCELLS=0;
geom->NCELLS=0;
//
//set the JDIV arrays and the sampling cells at the various levels of subdivision
KDIV=0;
geom->JDIV=0;
//
for(N=1;N<=geom->NDIV;N++){ //divisions
ISD=0;
ISD[0][1]=1;
KDIV[0]=KDIV[0]+1;
for(I=0;I<=geom->ILEVEL;I++){ //level of subdivision
J=pow(2,I); //number of possible subdivisions at this level
JJ=NSEG/J; //number of segments in a subdivision
for(M=1;M<=J;M++){
if(ISD[I][M] == 1){
NSEG1=(N-1)*NSEG+(M-1)*JJ+1;
NSEG2=NSEG1+JJ-1;
A=0.e00;
for(L=NSEG1;L<=NSEG2;L++)
A=A+DRAT[L];
if(A < DCRIT){
geom->NCELLS=geom->NCELLS+1;
output->VAR[11][geom->NCELLS]=gas->FTMP[1];
XMIN[geom->NCELLS]=geom->XB[1]+double(NSEG1-1)*DDE;
XMAX[geom->NCELLS]=XMIN[geom->NCELLS]+double(NSEG2-NSEG1+1)*DDE;
//WRITE (9,*) NCELLS,I,' XMIN,XMAX',XMIN(NCELLS),XMAX(NCELLS)
file_9<< geom->NCELLS<<" "<<I<<" XMIN,XMAX "<<XMIN[geom->NCELLS]<<" , "<<XMAX[geom->NCELLS]<<endl;
geom->JDIV[I][KDIV[I]-(J-M)]=-geom->NCELLS;
// WRITE (9,*) 'JDIV(',I,',',KDIV(I)-(J-M),')=',-NCELLS
}
else{
geom->JDIV[I][KDIV[I]-(J-M)]=KDIV[I+1];
// WRITE (9,*) 'JDIV(',I,',',KDIV(I)-(J-M),')=',KDIV(I+1)
KDIV[I+1]=KDIV[I+1]+2;
for(L=NSEG1-(N-1)*NSEG;L<=NSEG2-(N-1)*NSEG;L++)
ISD[I+1][L]=1;
}
}
}
}
}
//
//set the other quantities associated with the sampling cells and the collision cells
//
geom->NCCELLS=0;
for(N=1;N<=geom->NCELLS;N++){
geom->CELL[1][N]=(XMIN[N]+XMAX[N])/2.e00;
geom->CELL[2][N]=XMIN[N];
geom->CELL[3][N]=XMAX[N];
if(geom->IFX == 0) geom->CELL[4][N]=XMAX[N]-XMIN[N]; //calculation assumes unit cross-section
if(geom->IFX == 1) geom->CELL[4][N]=PI*(pow(XMAX[N],2)-pow(XMIN[N],2));
if(geom->IFX == 2) geom->CELL[4][N]=1.33333333333333333333e00*PI*(pow(XMAX[N],3)-pow(XMIN[N],3));
geom->ICELL[N]=geom->NCCELLS;
for(M=1;M<=geom->NCIS;M++){
geom->NCCELLS=geom->NCCELLS+1;
geom->ICCELL[3][geom->NCCELLS]=N;
geom->CCELL[1][geom->NCCELLS]=geom->CELL[4][N]/double(geom->NCIS);
geom->CCELL[3][geom->NCCELLS]=calc->DTM/2.e00;
geom->CCELL[4][geom->NCCELLS]=2.e00*gas->VMPM*gas->SPM[2][1][1];
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
geom->CCELL[2][geom->NCCELLS]=calc->RANF;
geom->CCELL[5][geom->NCCELLS]=calc->FTIME;
}
}
//
//assign the molecules to the cells
//
for(N=1;N<=molecs->NM;N++){
FIND_CELL_1D(molecs->PX[1][N],molecs->IPCELL[N],JJ);
M=molecs->IPCELL[N];
}
//
//deallocate the local variables
for (int i = 0; i < geom->ILEVEL+1; i++) {
delete [] ISD[i];
}
delete [] ISD;
delete [] NC;
delete[] KDIV;
delete [] XMAX;
delete [] XMIN;
delete [] DRAT;
/*DEALLOCATE (KDIV,NC,ISD,XMIN,XMAX,DRAT,STAT=ERROR)
IF (ERROR /= 0) THEN
WRITE (*,*)'PROGRAM COULD NOT DEALLOCATE LOCAL ARRAYS IN ADAPT',ERROR
END IF*/
//
return;
}
void EXTEND_MNM(double FAC)
{ //
//the maximum number of molecules is increased by a specified factor
//the existing molecules are copied TO disk storage
//MOLECS molecs;
//CALC calc;
//GAS gas;
//
// IMPLICIT NONE
//
int M,N,MNMN;
fstream file_7;
// REAL :: FAC
//
//M,N working integers
//MNMN extended value of MNM
//FAC the factor for the extension
MNMN=FAC*molecs->MNM;
cout<< "Maximum number of molecules is to be extended from "<<molecs->MNM<<" to "<<MNMN<<endl;
cout<< "( if the additional memory is available //// )"<<endl;
file_7.open("EXTMOLS.SCR", ios::binary | ios::out);
if(file_7.is_open()){
cout<<"EXTMOLS.SCR is opened"<<endl;
}
else{
cout<<"EXTMOLS.SCR not opened"<<endl;
}
cout<<"Start write to disk storage"<<endl;
//OPEN (7,FILE='EXTMOLS.SCR',FORM='BINARY')
//WRITE (*,*) 'Start write to disk storage'
for(N=1;N<=molecs->MNM;N++){
if(gas->MMVM > 0){
file_7<<molecs->PX[calc->NCLASS][N]<<endl<<molecs->PTIM[N]<<endl<<molecs->PROT[N]<<endl;
for(M=1;M<=3;M++)
file_7<<molecs->PV[M][N]<<endl;
file_7<<molecs->IPSP[N]<<endl<<molecs->IPCELL[N]<<endl<<molecs->ICREF[N]<<endl<<molecs->IPCP[N]<<endl;
for(M=1;M<=gas->MMVM;M++)
file_7<<molecs->IPVIB[M][N]<<endl;
file_7<<molecs->PELE[N]<<endl;//WRITE (7) PX(NCLASS,N),PTIM(N),PROT(N),(PV(M,N),M=1,3),IPSP(N),IPCELL(N),ICREF(N),IPCP(N),(IPVIB(M,N),M=1,MMVM),PELE(N)
}
else{
if(gas->MMRM > 0){
file_7<<molecs->PX[calc->NCLASS][N]<<endl<<molecs->PTIM[N]<<endl<<molecs->PROT[N]<<endl;
for(M=1;M<=3;M++)
file_7<<molecs->PV[M][N]<<endl;
file_7<<molecs->IPSP[N]<<endl<<molecs->IPCELL[N]<<endl<<molecs->ICREF[N]<<endl<<molecs->IPCP[N]<<endl<<molecs->PELE[N]<<endl;//WRITE (7) PX(NCLASS,N),PTIM(N),PROT(N),(PV(M,N),M=1,3),IPSP(N),IPCELL(N),ICREF(N),IPCP(N),PELE(N)
}
else{
file_7<<molecs->PX[calc->NCLASS][N]<<endl<<molecs->PTIM[N]<<endl;
for(M=1;M<=3;M++)
file_7<<molecs->PV[M][N]<<endl;
file_7<<molecs->IPSP[N]<<endl<<molecs->IPCELL[N]<<endl<<molecs->ICREF[N]<<endl<<molecs->IPCP[N]<<endl<<molecs->PELE[N]<<endl;//WRITE (7) PX(NCLASS,N),PTIM(N),(PV(M,N),M=1,3),IPSP(N),IPCELL(N),ICREF(N),IPCP(N),PELE(N)
}
}
}
cout<<"Disk write completed"<<endl;
// WRITE (*,*) 'Disk write completed'
// CLOSE (7)
file_7.close();
if(gas->MMVM > 0){
for(int i=0;i<calc->NCLASS+1;i++){
hipFree(molecs->PX[i]); //delete [] molecs->PX[i];
}
hipFree(molecs->PX); //delete [] molecs->PX;
hipFree(molecs->PTIM); //delete [] molecs->PTIM;
hipFree(molecs->PROT);
for(int i=0;i<4;i++){
hipFree(molecs->PV[i]); //delete [] molecs->PV[i];
}
hipFree(molecs->PV); //delete [] molecs->PV;
hipFree(molecs->IPSP);
hipFree(molecs->IPCELL);
hipFree(molecs->ICREF);
hipFree(molecs->IPCP);
hipFree(molecs->PELE);
for(int i=0;i<gas->MMVM;i++){
hipFree(molecs->IPVIB[i]); //delete [] molecs->IPVIB[i];
}
hipFree(molecs->IPVIB); //delete molecs->IPVIB;
// for(int i=0;i<calc->NCLASS+1;i++){
// delete [] molecs->PX[i];
// }
// delete [] molecs->PX;
// delete [] molecs->PTIM;
// delete [] molecs->PROT;
// for(int i=0;i<4;i++){
// delete [] molecs->PV[i];
// }
// delete [] molecs->PV;
// delete [] molecs->IPSP;
// delete [] molecs->IPCELL;
// delete [] molecs->ICREF;
// delete [] molecs->IPCP;
// delete [] molecs->PELE;
// for(int i=0;i<gas->MMVM;i++){
// delete [] molecs->IPVIB[i];
// }
// delete molecs->IPVIB;
//DEALLOCATE (PX,PTIM,PROT,PV,IPSP,IPCELL,ICREF,IPCP,IPVIB,PELE,STAT=ERROR)
}
else{
if(gas->MMRM > 0){
for(int i=0;i<calc->NCLASS+1;i++){
hipFree(molecs->PX[i]); //delete [] molecs->PX[i];
}
hipFree(molecs->PX); //delete [] molecs->PX;
hipFree(molecs->PTIM); //delete [] molecs->PTIM;
hipFree(molecs->PROT);
for(int i=0;i<4;i++){
hipFree(molecs->PV[i]); //delete [] molecs->PV[i];
}
hipFree(molecs->PV); //delete [] molecs->PV;
hipFree(molecs->IPSP);
hipFree(molecs->IPCELL);
hipFree(molecs->ICREF);
hipFree(molecs->IPCP);
hipFree(molecs->PELE);
// delete [] molecs->IPSP;
// delete [] molecs->IPCELL;
// delete [] molecs->ICREF;
// delete [] molecs->IPCP;
// delete [] molecs->PELE;//DEALLOCATE (PX,PTIM,PV,IPSP,IPCELL,ICREF,IPCP,PELE,STAT=ERROR)
// for(int i=0;i<calc->NCLASS+1;i++){
// delete [] molecs->PX[i];
// }
// delete [] molecs->PX;
// delete [] molecs->PTIM;
// delete [] molecs->PROT;
// for(int i=0;i<4;i++){
// delete [] molecs->PV[i];
// }
// delete [] molecs->PV;
// delete [] molecs->IPSP;
// delete [] molecs->IPCELL;
// delete [] molecs->ICREF;
// delete [] molecs->IPCP;
// delete [] molecs->PELE;
//DEALLOCATE (PX,PTIM,PROT,PV,IPSP,IPCELL,ICREF,IPCP,PELE,STAT=ERROR)
}
else{
for(int i=0;i<calc->NCLASS+1;i++){
hipFree(molecs->PX[i]); //delete [] molecs->PX[i];
}
hipFree(molecs->PX); //delete [] molecs->PX;
hipFree(molecs->PTIM); //delete [] molecs->PTIM;
for(int i=0;i<4;i++){
hipFree(molecs->PV[i]); //delete [] molecs->PV[i];
}
hipFree(molecs->PV); //delete [] molecs->PV;
hipFree(molecs->IPSP);
hipFree(molecs->IPCELL);
hipFree(molecs->ICREF);
hipFree(molecs->IPCP);
hipFree(molecs->PELE);
// delete [] molecs->IPSP;
// delete [] molecs->IPCELL;
// delete [] molecs->ICREF;
// delete [] molecs->IPCP;
// delete [] molecs->PELE;//DEALLOCATE (PX,PTIM,PV,IPSP,IPCELL,ICREF,IPCP,PELE,STAT=ERROR)
}
}
// IF (ERROR /= 0) THEN
// WRITE (*,*)'PROGRAM COULD NOT DEALLOCATE MOLECULES',ERROR
// ! STOP
// END IF
// !
if(gas->MMVM > 0){
molecs->d_allocate(calc->NCLASS+1,MNMN+1,molecs->PX);
molecs->d_allocate(MNMN+1,molecs->PTIM);
molecs->d_allocate(MNMN+1,molecs->PROT);
molecs->d_allocate(4,MNMN+1,molecs->PV);
molecs->i_allocate(MNMN+1,molecs->IPSP);
molecs->i_allocate(MNMN+1,molecs->IPCELL);
molecs->i_allocate(MNMN+1,molecs->ICREF);
molecs->i_allocate(MNMN+1,molecs->IPCP);
molecs->i_allocate(gas->MMVM+1,MNMN+1,molecs->IPVIB);
molecs->d_allocate(MNMN+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNMN),PTIM(MNMN),PROT(MNMN),PV(3,MNMN),IPSP(MNMN),IPCELL(MNMN),ICREF(MNMN),IPCP(MNMN),IPVIB(MMVM,MNMN),PELE(MNMN),STAT=ERROR)
}
else{
if(gas->MMRM > 0){
molecs->d_allocate(calc->NCLASS+1,MNMN+1,molecs->PX);
molecs->d_allocate(MNMN+1,molecs->PTIM);
molecs->d_allocate(MNMN+1,molecs->PROT);
molecs->d_allocate(4,MNMN+1,molecs->PV);
molecs->i_allocate(MNMN+1,molecs->IPSP);
molecs->i_allocate(MNMN+1,molecs->IPCELL);
molecs->i_allocate(MNMN+1,molecs->ICREF);
molecs->i_allocate(MNMN+1,molecs->IPCP);
molecs->d_allocate(MNMN+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNMN),PTIM(MNMN),PROT(MNMN),PV(3,MNMN),IPSP(MNMN),IPCELL(MNMN),ICREF(MNMN),IPCP(MNMN),PELE(MNMN),STAT=ERROR)
}
else{
molecs->d_allocate(calc->NCLASS+1,MNMN+1,molecs->PX);
molecs->d_allocate(MNMN+1,molecs->PTIM);
molecs->d_allocate(4,MNMN+1,molecs->PV);
molecs->i_allocate(MNMN+1,molecs->IPSP);
molecs->i_allocate(MNMN+1,molecs->IPCELL);
molecs->i_allocate(MNMN+1,molecs->ICREF);
molecs->i_allocate(MNMN+1,molecs->IPCP);
molecs->d_allocate(MNMN+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNMN),PTIM(MNMN),PV(3,MNMN),IPSP(MNMN),IPCELL(MNMN),ICREF(MNMN),IPCP(MNMN),PELE(MNMN),STAT=ERROR)
}
}
// IF (ERROR /= 0) THEN
// WRITE (*,*)'PROGRAM COULD NOT ALLOCATE SPACE FOR EXTEND_MNM',ERROR
// ! STOP
// END IF
// !
//memset(molecs->PX,0.0,sizeof(**molecs->PX)); memset(molecs->PTIM,0.0,sizeof(*molecs->PTIM)); memset(molecs->PV,0.0,sizeof(**molecs->PV)); memset(molecs->IPSP,0,sizeof(*molecs->IPSP)); memset(molecs->IPCELL,0,sizeof(*molecs->IPCELL)); memset(molecs->ICREF,0,sizeof(*molecs->ICREF)); memset(molecs->IPCP,0,sizeof(*molecs->IPCP)); memset(molecs->PELE,0,sizeof(*molecs->PELE));
for(int i=0;i<calc->NCLASS+1;i++){
for(int j=0;j<MNMN+1;j++)
molecs->PX[i][j]=0.0;
}
for(int i=0;i<4;i++){
for(int j=0;j<MNMN+1;j++)
molecs->PV[i][j]=0.0;
}
for(int i=0;i<MNMN+1;i++){
molecs->PTIM[i]=0.0;
molecs->IPSP[i]=0;
molecs->IPCELL[i]=0;
molecs->ICREF[i]=0;
molecs->IPCP[i]=0;
molecs->PELE[i]=0;
}
if(gas->MMRM > 0) {
for(int i=0;i<MNMN+1;i++)
molecs->PROT[i]=0.0;
//memset(molecs->PROT,0.0,sizeof(*molecs->PROT));
}
if(gas->MMVM > 0) {
for(int i=0;i<gas->MMVM+1;i++){
for(int j=0;j<MNMN+1;j++)
molecs->IPVIB[i][j]=0;
}
//memset(molecs->IPVIB,0,sizeof(**molecs->IPVIB));
}
//restore the original molecules
// OPEN (7,FILE='EXTMOLS.SCR',FORM='BINARY')
// WRITE (*,*) 'Start read back from disk storage'
file_7.open("EXTMOLS.SCR", ios::binary | ios::in);
if(file_7.is_open()){
cout<<"EXTMOLS.SCR is opened"<<endl;
}
else{
cout<<"EXTMOLS.SCR not opened"<<endl;
}
for(N=1;N<=molecs->MNM;N++){
if(gas->MMVM > 0){
file_7>>molecs->PX[calc->NCLASS][N]>>molecs->PTIM[N]>>molecs->PROT[N];
for(M=1;M<=3;M++)
file_7>>molecs->PV[M][N];
file_7>>molecs->IPSP[N]>>molecs->IPCELL[N]>>molecs->ICREF[N]>>molecs->IPCP[N];
for(M=1;M<=gas->MMVM;M++)
file_7>>molecs->IPVIB[M][N];
file_7>>molecs->PELE[N];//READ (7) PX(NCLASS,N),PTIM(N),PROT(N),(PV(M,N),M=1,3),IPSP(N),IPCELL(N),ICREF(N),IPCP(N),(IPVIB(M,N),M=1,MMVM),PELE(N)
}
else{
if(gas->MMRM > 0){
file_7>>molecs->PX[calc->NCLASS][N]>>molecs->PTIM[N]>>molecs->PROT[N];
for(M=1;M<=3;M++)
file_7>>molecs->PV[M][N];
file_7>>molecs->IPSP[N]>>molecs->IPCELL[N]>>molecs->ICREF[N]>>molecs->IPCP[N]>>molecs->PELE[N];//READ (7) PX(NCLASS,N),PTIM(N),PROT(N),(PV(M,N),M=1,3),IPSP(N),IPCELL(N),ICREF(N),IPCP(N),PELE(N)
}
else{
file_7>>molecs->PX[calc->NCLASS][N]>>molecs->PTIM[N];
for(M=1;M<=3;M++)
file_7>>molecs->PV[M][N];
file_7>>molecs->IPSP[N]>>molecs->IPCELL[N]>>molecs->ICREF[N]>>molecs->IPCP[N]>>molecs->PELE[N];//READ (7) PX(NCLASS,N),PTIM(N),(PV(M,N),M=1,3),IPSP(N),IPCELL(N),ICREF(N),IPCP(N),PELE(N)
}
}
}
cout<<"Disk read completed"<<endl;
// WRITE (*,*) 'Disk read completed'
// CLOSE (7,STATUS='DELETE')
file_7.close();
//
molecs->MNM=MNMN;
//
return;
}
void DISSOCIATION()
{
//dissociate diatomic molecules that have been marked for dissociation by -ve level or -99999 for ground state
//MOLECS molecs;
//GAS gas;
//CALC calc;
//
// IMPLICIT NONE
//
int K,KK,L,N,M,LS,MS,KV,IDISS;
double A,B,C,EA,VRR,VR,RMM,RML;
double VRC[4],VCM[4],VRCP[4];
//
N=0;
while(N < molecs->NM){
N=N+1;
IDISS=0;
L=molecs->IPSP[N];
if(gas->ISPV[L] > 0){
for(K=1;K<=gas->ISPV[L];K++){
M=molecs->IPVIB[K][N];
if(M < 0){
//dissociation
calc->TDISS[L]=calc->TDISS[L]+1.e00;
IDISS=1;
}
}
if(IDISS == 1){
EA=molecs->PROT[N]; //EA is energy available for relative translational motion of atoms
if(gas->MELE > 1) EA=EA+molecs->PELE[N];
if(molecs->NM >= molecs->MNM) EXTEND_MNM(1.1);
molecs->NM=molecs->NM+1;
//set center of mass velocity as that of molecule
VCM[1]=molecs->PV[1][N];
VCM[2]=molecs->PV[2][N];
VCM[3]=molecs->PV[3][N];
molecs->PX[calc->NCLASS][molecs->NM]=molecs->PX[calc->NCLASS][N];
molecs->IPCELL[molecs->NM]=molecs->IPCELL[N];
LS=molecs->IPSP[N];
gas->TREACL[1][LS]=gas->TREACL[1][LS]-1;
molecs->IPSP[molecs->NM]=gas->ISPVM[1][1][L];
MS=molecs->IPSP[molecs->NM];
molecs->IPSP[N]=gas->ISPVM[2][1][L];
LS=molecs->IPSP[N];
gas->TREACG[1][LS]=gas->TREACG[1][LS]+1;
gas->TREACG[1][MS]=gas->TREACG[1][MS]+1;
molecs->PTIM[molecs->NM]=molecs->PTIM[N];
VRR=2.e00*EA/gas->SPM[1][LS][MS];
VR=sqrt(VRR);
RML=gas->SPM[1][LS][MS]/gas->SP[5][MS];
RMM=gas->SPM[1][LS][MS]/gas->SP[5][LS];
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
B=2.e00*calc->RANF-1.e00;
A=sqrt(1.e00-B*B);
VRCP[1]=B*VR;
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
C=2.e00*PI*calc->RANF;
VRCP[2]=A*cos(C)*VR;
VRCP[3]=A*sin(C)*VR;
for(KK=1;KK<=3;KK++){
molecs->PV[KK][N]=VCM[KK]+RMM*VRCP[KK];
molecs->PV[KK][molecs->NM]=VCM[KK]-RML*VRCP[KK];
}
if((fabs(molecs->PV[1][N]) > 100000.e00) || (fabs(molecs->PV[1][molecs->NM]) > 100000.e00)) {
cout<< "EXCESSIVE SPEED, DISS "<< N<< " "<<molecs->PV[1][N]<<" "<<molecs->NM<<" "<<molecs->PV[1][molecs->NM]<<endl;
}
//set any internal modes to the ground state
if(gas->ISPV[LS] > 0){
for(KV=1;KV<=gas->ISPV[LS];KV++)
molecs->IPVIB[KV][N]=0;
}
if(gas->ISPR[1][LS] > 0) molecs->PROT[N]=0.e00;
if(gas->MELE > 1) molecs->PELE[N]=0.e00;
if(gas->ISPV[MS] > 0){
for(KV=1;KV<=gas->ISPV[MS];KV++)
molecs->IPVIB[KV][molecs->NM]=0;
}
if(gas->ISPR[1][MS] > 0) molecs->PROT[molecs->NM]=0.0;
if(gas->MELE > 1) molecs->PELE[molecs->NM]=0.e00;
}
}
}
return;
}
//************************************************************************************
//
void ENERGY(int I,double &TOTEN)
{
//calculate the total energy (all molecules if I=0, otherwise molecule I)
//I>0 used for dianostic purposes only
//MOLECS molecs;
//GAS gas;
//CALC calc;
//
// IMPLICIT NONE
//
int K,L,N,II,M,IV,KV,J;
double TOTENI,TOTELE;
//
TOTEN=0.0;
TOTELE=0;
//
if(I == 0){
for(N=1;N<=molecs->NM;N++){
if(molecs->IPCELL[N] > 0){
L=molecs->IPSP[N];
TOTENI=TOTEN;
TOTEN=TOTEN+gas->SP[6][L];
TOTEN=TOTEN+0.5e00*gas->SP[5][L]*(pow(molecs->PV[1][N],2)+pow(molecs->PV[2][N],2)+pow(molecs->PV[3][N],2));
if(gas->ISPR[1][L] > 0) TOTEN=TOTEN+molecs->PROT[N];
if(gas->ISPV[L] > 0){
for(KV=1;KV<=gas->ISPV[L];KV++){
J=molecs->IPVIB[KV][N];
// IF (J <0) THEN
// J=-J
// IF (J == 99999) J=0
// END IF
TOTEN=TOTEN+double(J)*BOLTZ*gas->SPVM[1][KV][L];
}
}
}
if(gas->MELE > 1){
TOTEN=TOTEN+molecs->PELE[N];
TOTELE=TOTELE+molecs->PELE[N];
}
if((TOTEN-TOTENI) > 1.e-16) cout<<"MOL "<<N<<" ENERGY "<<TOTEN-TOTENI<<endl;
}
//
//WRITE (9,*) 'Total Energy =',TOTEN,NM
//WRITE (*,*) 'Total Energy =',TOTEN,NM
file_9<<"Total Energy = "<<setprecision(25)<<TOTEN<<"\t"<<molecs->NM<<endl;
cout<<"Total Energy = "<<setprecision(20)<<TOTEN<<"\t"<<molecs->NM<<endl;
// WRITE (*,*) 'Electronic Energy =',TOTELE
}
else{
N=I;
if(molecs->IPCELL[N] > 0){
L=molecs->IPSP[N];
TOTEN=TOTEN+gas->SP[6][L];
TOTEN=TOTEN+0.5e00*gas->SP[5][L]*(pow(molecs->PV[1][N],2)+pow(molecs->PV[2][N],2)+pow(molecs->PV[3][N],2));
if(gas->ISPR[1][L] > 0) TOTEN=TOTEN+molecs->PROT[N];
if(gas->ISPV[L] > 0){
for(KV=1;KV<=gas->ISPV[L];KV++){
J=molecs->IPVIB[KV][N];
// IF (J <0) THEN
// J=-J
// IF (J == 99999) J=0
// END IF
TOTEN=TOTEN+double(J)*BOLTZ*gas->SPVM[1][KV][L];
}
}
}
}
//
return; //
}
void SETXT()
{
//generate TECPLOT files for displaying an x-t diagram of an unsteady flow
//this employs ordered data, therefore the cells MUST NOT BE ADAPTED
//N.B. some custom coding for particular problems
//
//
//MOLECS molecs;
//CALC calc;
//GEOM_1D geom;
//GAS gas;
//OUTPUT output;
//
// IMPLICIT NONE
//
int N,M,IOUT;
double A,C;
double **VALINT;
// REAL(KIND=8), ALLOCATABLE, DIMENSION(:,:) :: VALINT
//
//VALINT(N,M) the interpolated values at sampling cell M boundaries and extrapolated values at boundaries
// N=1 distance
// N=2 time
// N=3 number density
// N=4 radial velocity
// N=5 pressure (nkT)
// N=6 temperature
// N=7 h2o fraction (Sec. 7.9 only)
//
//the variables in VALINT may be altered for particular problems
//
VALINT = new double*[7];
for(int i =0; i< 7; ++i)
VALINT[i] = new double[geom->NCELLS+2];
// ALLOCATE (VALINT(6,NCELLS+1),STAT=ERROR)
//
//777 FORMAT(12G14.6)
//24[]
//Internal options
IOUT=0; //0 for dimensioned output, 1 for non-dimensional output
//
A=1.e00; //dt/dt for selection of v velocity component in TECPLOT to draw particle paths as "streamlines"
//
if(calc->FTIME < 0.5e00*calc->DTM){
//Headings and zero time record
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR VALINT ARRAY',ERROR
// ENDIF
calc->NLINE=1;
file_9<< "J in tecplot file = "<<calc->NLINE*(geom->NCELLS+1)<<endl;
// WRITE (18,*) 'VARIABLES = "Distance","Time","n","u","p","T","H2O","A"' //for combustion wave output(Sec. 7.9)
file_18<<"VARIABLES = 'Distance','Time','n','u','p','T','A' "<<endl;
file_18<<"ZONE I= "<<geom->NCELLS+1<<", J= (set to number of output intervals+1), F=POINT"<<endl;
//
for(N=1;N<=geom->NCELLS+1;N++){
VALINT[1][N]=geom->XB[1]+(N-1)*geom->DDIV; //distance
VALINT[1][N]=VALINT[1][N]; //time
VALINT[2][N]=0.0;
VALINT[3][N]=gas->FND[1];
VALINT[4][N]=0;
VALINT[5][N]=gas->FND[1]*BOLTZ*gas->FTMP[1];
VALINT[6][N]=gas->FTMP[1];
// VALINT(7,N)=FSP(6,1) //FSP(6 for combustion wave
if((VALINT[1][N] > geom->XS) && (calc->ISECS == 1)){
VALINT[3][N]=gas->FND[2];
VALINT[5][N]=gas->FND[2]*BOLTZ*gas->FTMP[2];
VALINT[6][N]=gas->FTMP[2];
// VALINT(7,N)=FSP(6,2)
}
if(IOUT == 1){
VALINT[3][N]=1.e00;
VALINT[5][N]=1.e00;
VALINT[6][N]=1.e00;
}
for(M=1;M<=6;M++)
file_18<<VALINT[M][N]<<"\t";//WRITE (18,777) (VALINT(M,N),M=1,6),A
file_18<<A<<endl;
}
}
else{
calc->NLINE=calc->NLINE+1;
cout<<"J in tecplot file = "<<calc->NLINE<<endl;
if(geom->IVB == 0) C=geom->DDIV;
if(geom->IVB == 1) C=(geom->XB[2]+geom->VELOB*calc->FTIME-geom->XB[1])/double(geom->NDIV);
for(N=1;N<=geom->NCELLS+1;N++){
VALINT[1][N]=geom->XB[1]+(N-1)*C;
VALINT[2][N]=calc->FTIME;
if((N > 1) && (N < geom->NCELLS+1)){
VALINT[3][N]=0.5e00*(output->VAR[3][N]+output->VAR[3][N-1]);
VALINT[4][N]=0.5e00*(output->VAR[5][N]+output->VAR[5][N-1]);
VALINT[5][N]=0.5e00*(output->VAR[18][N]+output->VAR[18][N-1]);
VALINT[6][N]=0.5e00*(output->VAR[11][N]+output->VAR[11][N-1]);
// VALINT(7,N)=0.5D00*(VARSP(1,N,6)+VARSP(1,N-1,6)) //H2O fraction for Sec 7.9
}
}
for(N=3;N<=6;N++)
VALINT[N][1]=0.5e00*(3.e00*VALINT[N][2]-VALINT[N][3]);
//
for(N=3;N<=6;N++)
VALINT[N][geom->NCELLS+1]=0.5e00*(3.e00*VALINT[N][geom->NCELLS]-VALINT[N][geom->NCELLS-1]);
//
for(N=1;N<=geom->NCELLS+1;N++){
if(IOUT == 1){
VALINT[1][N]=(VALINT[1][N]-geom->XB[1])/(geom->XB[2]-geom->XB[1]);
VALINT[2][N]=VALINT[2][N]/calc->TNORM;
VALINT[3][N]=VALINT[3][N]/gas->FND[1];
VALINT[4][N]=VALINT[4][N]/gas->VMPM;
VALINT[5][N]=VALINT[5][N]/(gas->FND[1]*BOLTZ*gas->FTMP[1]);
VALINT[6][N]=VALINT[6][N]/gas->FTMP[1];
}
for(M=1;M<=6;M++)
file_18<<VALINT[M][N]<<"\t";//WRITE (18,777) (VALINT[M][N],M=1,6),A //
file_18<<A<<endl;
}
}
//
return;
}
void MOLECULES_MOVE_1D()
{//
//molecule moves appropriate to the time step
//for homogeneous and one-dimensional flows
//(homogeneous flows are calculated as one-dimensional)
//MOLECS molecs;
//GAS gas;
//GEOM_1D geom;
//CALC calc;
//OUTPUT output;
//
// IMPLICIT NONE
//
int N,L,M,K,NCI,J,II,JJ;
double A,B,X,XI,XC,DX,DY,DZ,DTIM,S1,XM,R,TI,DTC,POB,UR,WFI,WFR,WFRI;
//
//N working integer
//NCI initial cell time
//DTIM time interval for the move
//POB position of the outer boundary
//TI initial time
//DTC time interval to collision with surface
//UR radial velocity component
//WFI initial weighting factor
//WFR weighting factor radius
//WFRI initial weighting factor radius
//
if((geom->ITYPE[2] == 4) && (calc->ICN == 1)){
//memset(calc->ALOSS,0.e00,sizeof(*calc->ALOSS));//calc->ALOSS=0.e00;
for(int i=0;i<gas->MSP+1;i++)
calc->ALOSS[i]=0.e00;
calc->NMP=molecs->NM;
}
//
N=1;
while(N <= molecs->NM){
//
NCI=molecs->IPCELL[N];
if((calc->IMTS == 0) || (calc->IMTS == 2)) DTIM=calc->DTM;
if(calc->IMTS == 1) DTIM=2.e00*geom->CCELL[3][NCI];
if(calc->FTIME-molecs->PTIM[N] > 0.5*DTIM){
WFI=1.e00;
if(geom->IWF == 1) WFI=1.e00+geom->WFM*pow(molecs->PX[1][N],geom->IFX);
II=0; //becomes 1 if a molecule is removed
TI=molecs->PTIM[N];
molecs->PTIM[N]=TI+DTIM;
calc->TOTMOV=calc->TOTMOV+1;
//
XI=molecs->PX[1][N];
DX=DTIM*molecs->PV[1][N];
X=XI+DX;
//
if(geom->IFX > 0){
DY=0.e00;
DZ=DTIM*molecs->PV[3][N];
if(geom->IFX == 2) DY=DTIM*molecs->PV[2][N];
R=sqrt(X*X+DY*DY+DZ*DZ);
}
//
if(geom->IFX == 0){
for(J=1;J<=2;J++){ // 1 for minimum x boundary, 2 for maximum x boundary
if(II == 0){
if(((J == 1) && (X < geom->XB[1])) || ((J == 2) && (X > (geom->XB[2]+geom->VELOB*molecs->PTIM[N])))){ //molecule crosses a boundary
if((geom->ITYPE[J] == 0) || (geom->ITYPE[J] == 3) || (geom->ITYPE[J] == 4)){
if(geom->XREM > geom->XB[1]){
L=molecs->IPSP[N];
calc->ENTMASS=calc->ENTMASS-gas->SP[5][L];
}
if((geom->ITYPE[2] == 4) && (calc->ICN == 1)){
L=molecs->IPSP[N];
calc->ALOSS[L]=calc->ALOSS[L]+1.e00;
}
REMOVE_MOL(N);
N=N-1;
II=1;
}
//
if(geom->ITYPE[J] == 1){
if((geom->IVB == 0) || (J == 1)){
X=2.e00*geom->XB[J]-X;
molecs->PV[1][N]=-molecs->PV[1][N];
}
else if((J == 2) && (geom->IVB == 1)){
DTC=(geom->XB[2]+TI*geom->VELOB-XI)/(molecs->PV[1][N]-geom->VELOB);
XC=XI+molecs->PV[1][N]*DTC;
molecs->PV[1][N]=-molecs->PV[1][N]+2.*geom->VELOB;
X=XC+molecs->PV[1][N]*(DTIM-DTC);
}
}
//
if(geom->ITYPE[J] == 2)
REFLECT_1D(N,J,X);
// END IF
}
}
}
}
else{ //cylindrical or spherical flow
//check boundaries
if((X <geom-> XB[1]) && (geom->XB[1] > 0.e00)){
RBC(XI,DX,DY,DZ,geom->XB[1],S1);
if(S1 < 1.e00){ //intersection with inner boundary
if(geom->ITYPE[1] == 2){//solid surface
DX=S1*DX;
DY=S1*DY;
DZ=S1*DZ;
AIFX(XI,DX,DY,DZ,X,molecs->PV[1][N],molecs->PV[2][N],molecs->PV[3][N]);
REFLECT_1D(N,1,X);
}
else{
REMOVE_MOL(N);
N=N-1;
II=1;
}
}
}
else if((geom->IVB == 0) && (R > geom->XB[2])){
RBC(XI,DX,DY,DZ,geom->XB[2],S1);
if(S1 < 1.e00){ //intersection with outer boundary
if(geom->ITYPE[2] == 2){ //solid surface
DX=S1*DX;
DY=S1*DY;
DZ=S1*DZ;
AIFX(XI,DX,DY,DZ,X,molecs->PV[1][N],molecs->PV[2][N],molecs->PV[3][N]);
X=1.001e00*geom->XB[2];
while(X > geom->XB[2])
REFLECT_1D(N,2,X);
// END DO
}
else{
REMOVE_MOL(N);
N=N-1;
II=1;
}
}
}
else if((geom->IVB == 1) && (R > (geom->XB[2]+molecs->PTIM[N]*geom->VELOB))){
if(geom->IFX == 1) UR=sqrt(pow(molecs->PV[1][N],2)+pow(molecs->PV[2][N],2));
if(geom->IFX == 2) UR=sqrt(pow(molecs->PV[1][N],2)+pow(molecs->PV[2][N],2)+pow(molecs->PV[3][N],2));
DTC=(geom->XB[2]+TI*geom->VELOB-XI)/(UR-geom->VELOB);
S1=DTC/DTIM;
DX=S1*DX;
DY=S1*DY;
DZ=S1*DZ;
AIFX(XI,DX,DY,DZ,X,molecs->PV[1][N],molecs->PV[2][N],molecs->PV[3][N]);
molecs->PV[1][N]=-molecs->PV[1][N]+2.0*geom->VELOB;
X=X+molecs->PV[1][N]*(DTIM-DTC);
}
else
AIFX(XI,DX,DY,DZ,X,molecs->PV[1][N],molecs->PV[2][N],molecs->PV[3][N]);
//DIAGNOSTIC
if(II == 0){
if(X > geom->XB[2]+molecs->PTIM[N]*geom->VELOB){
//WRITE (*,*) N,calc->FTIME,X,geom->XB[2]+molecs->PTIM[N]*geom->VELOB;
cout<<N<<" "<<calc->FTIME<<" "<<X<<" "<<(geom->XB[2]+molecs->PTIM[N]*geom->VELOB)<<endl;
}
}
//Take action on weighting factors
if((geom->IWF == 1) && (II == 0)){
WFR=WFI/(1.e00+geom->WFM*pow(X,geom->IFX));
L=0;
WFRI=WFR;
if(WFR >= 1.e00){
while(WFR >= 1.e00){
L=L+1;
WFR=WFR-1.e00;
}
}
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
if(calc->RANF <= WFR) L=L+1;
if(L == 0){
REMOVE_MOL(N);
N=N-1;
II=1;
}
L=L-1;
if(L > 0){
for(K=1;K<=L;K++){
if(molecs->NM >= molecs->MNM) EXTEND_MNM(1.1);
molecs->NM=molecs->NM+1;
molecs->PX[1][molecs->NM]=X;
for(M=1;M<=3;M++)
molecs->PV[M][molecs->NM]=molecs->PV[M][N];
if(gas->MMRM > 0) molecs->PROT[molecs->NM]=molecs->PROT[N];
molecs->IPCELL[molecs->NM]=fabs(molecs->IPCELL[N]);
molecs->IPSP[molecs->NM]=molecs->IPSP[N];
molecs->IPCP[molecs->NM]=molecs->IPCP[N];
if(gas->MMVM > 0){
for(M=1;M<=gas->MMVM;M++)
molecs->IPVIB[M][molecs->NM]=molecs->IPVIB[M][N];
}
molecs->PTIM[molecs->NM]=molecs->PTIM[N]; //+5.D00*DFLOAT(K)*DTM
//note the possibility of a variable time advance that may take the place of the duplication buffer in earlier programs
if(molecs->PX[1][molecs->NM] > geom->XB[2]+molecs->PTIM[molecs->NM]*geom->VELOB)
//WRITE (*,*) 'DUP',NM,FTIME,PX(1,NM),XB(2)+PTIM(NM)*VELOB
cout<<"DUP "<<molecs->NM<<" "<<calc->FTIME<<" "<<molecs->PX[1][molecs->NM]<<" "<<(geom->XB[2]+molecs->PTIM[molecs->NM]*geom->VELOB)<<endl;
}
}
}
}
//
if(II == 0){
molecs->PX[1][N]=X;
if(molecs->PX[1][N] > geom->XB[1] && (molecs->PX[1][N] < geom->XB[2]))
continue;
else{
cout<< N<<" OUTSIDE FLOWFIELD AT "<<molecs->PX[1][N]<<" VEL "<<molecs->PV[1][N]<<endl;
REMOVE_MOL(N);
N=N-1;
II=1;
}
}
//
if(II == 0){
if(geom->IVB == 0) FIND_CELL_1D(molecs->PX[1][N],molecs->IPCELL[N],JJ);
if(geom->IVB == 1) FIND_CELL_MB_1D(molecs->PX[1][N],molecs->IPCELL[N],JJ,molecs->PTIM[N]);
}
//
}
//
N=N+1;
}
//
return;
}
void READ_RESTART()
{
//MOLECS molecs;
//GEOM_1D geom;
//GAS gas;
//CALC calc;
//OUTPUT output;
// IMPLICIT NONE
//
fstream file_7;
int ZCHECK;
//
// 101 CONTINUE
_101:
file_7.open("PARAMETERS.DAT", ios::in | ios::binary);
if(file_7.is_open()){
cout<<"PARAMETERS.DAT opened successfully"<<endl;
file_7>>geom->NCCELLS>>geom->NCELLS>>gas->MMRM>>gas->MMVM>>molecs->MNM>>gas->MNSR>>gas->MSP>>geom->ILEVEL>>geom->MDIV>>gas->MMEX>>gas->MEX>>gas->MELE>>gas->MVIBL>>calc->NCLASS;
file_7.close();
}
else{
cout<<"PARAMETERS.DAT not opening"<<endl;
goto _101;
}
//cout<<geom->NCCELLS<<endl<<geom->NCELLS<<endl<<gas->MMRM<<endl<<gas->MMVM<<endl<<molecs->MNM<<endl;
// OPEN (7,FILE='PARAMETERS.DAT',FORM='BINARY',ERR=101)
// READ (7) NCCELLS,NCELLS,MMRM,MMVM,MNM,MNSR,MSP,ILEVEL,MDIV,MMEX,MEX,MELE,MVIBL,NCLASS
// CLOSE(7)
//
if(gas->MMVM > 0){
molecs->d_allocate(calc->NCLASS+1,molecs->MNM+1,molecs->PX);
molecs->d_allocate(molecs->MNM+1,molecs->PTIM);
molecs->d_allocate(molecs->MNM+1,molecs->PROT);
molecs->i_allocate(molecs->MNM+1,molecs->IPCELL);
molecs->i_allocate(molecs->MNM+1,molecs->IPSP);
molecs->i_allocate(molecs->MNM+1,molecs->ICREF);
molecs->i_allocate(molecs->MNM+1,molecs->IPCP);
molecs->d_allocate(4,molecs->MNM+1,molecs->PV);
molecs->i_allocate(gas->MMVM+1,molecs->MNM+1,molecs->IPVIB);
molecs->d_allocate(molecs->MNM+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNM),PTIM(MNM),PROT(MNM),IPCELL(MNM),IPSP(MNM),ICREF(MNM),IPCP(MNM),PV(3,MNM), &
// IPVIB(MMVM,MNM),PELE(MNM),STAT=ERROR)
}
else{
if(gas->MMRM > 0){
molecs->d_allocate(calc->NCLASS+1,molecs->MNM+1,molecs->PX);
molecs->d_allocate(molecs->MNM+1,molecs->PTIM);
molecs->d_allocate(molecs->MNM+1,molecs->PROT);
molecs->i_allocate(molecs->MNM+1,molecs->IPCELL);
molecs->i_allocate(molecs->MNM+1,molecs->IPSP);
molecs->i_allocate(molecs->MNM+1,molecs->ICREF);
molecs->i_allocate(molecs->MNM+1,molecs->IPCP);
molecs->d_allocate(4,molecs->MNM+1,molecs->PV);
molecs->d_allocate(molecs->MNM+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNM),PTIM(MNM),PROT(MNM),IPCELL(MNM),IPSP(MNM),ICREF(MNM),IPCP(MNM),PV(3,MNM),PELE(MNM),STAT=ERROR)
}
else{
molecs->d_allocate(calc->NCLASS+1,molecs->MNM+1,molecs->PX);
molecs->d_allocate(molecs->MNM+1,molecs->PTIM);
molecs->i_allocate(molecs->MNM+1,molecs->IPCELL);
molecs->i_allocate(molecs->MNM+1,molecs->IPSP);
molecs->i_allocate(molecs->MNM+1,molecs->ICREF);
molecs->i_allocate(molecs->MNM+1,molecs->IPCP);
molecs->d_allocate(4,molecs->MNM+1,molecs->PV);
molecs->d_allocate(molecs->MNM+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNM),PTIM(MNM),IPCELL(MNM),IPSP(MNM),ICREF(MNM),IPCP(MNM),PV(3,MNM),PELE(MNM),STAT=ERROR)
}
}
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR MOLECULE ARRAYS',ERROR
// ENDIF
//
geom->i_allocate(geom->ILEVEL+1,geom->MDIV+1,geom->JDIV);
// ALLOCATE (JDIV(0:ILEVEL,MDIV),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR JDIV ARRAY',ERROR
// ENDIF
geom->d_allocate(5,geom->NCELLS+1,geom->CELL);
geom->i_allocate(geom->NCELLS+1,geom->ICELL);
geom->d_allocate(6,geom->NCCELLS+1,geom->CCELL);
geom->i_allocate(4,geom->NCCELLS+1,geom->ICCELL);
// ALLOCATE (CELL(4,NCELLS),ICELL(NCELLS),CCELL(5,NCCELLS),ICCELL(3,NCCELLS),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR CELL ARRAYS',ERROR
// ENDIF
output->d_allocate(geom->NCELLS+1,output->COLLS);
output->d_allocate(geom->NCELLS+1,output->WCOLLS);
output->d_allocate(geom->NCELLS+1,output->CLSEP);
output->d_allocate(gas->MNSR+1,output->SREAC);
output->d_allocate(24,geom->NCELLS+1,output->VAR);
output->d_allocate(13,geom->NCELLS+1,gas->MSP+1,output->VARSP);
output->d_allocate(36+gas->MSP,3,output->VARS);
output->d_allocate(10+gas->MSP,geom->NCELLS+1,gas->MSP+1,output->CS);
output->d_allocate(9,3,gas->MSP+1,3,output->CSS);
output->d_allocate(7,3,output->CSSS);
// ALLOCATE (COLLS(NCELLS),WCOLLS(NCELLS),CLSEP(NCELLS),SREAC(MNSR),VAR(23,NCELLS), &
// VARSP(0:12,NCELLS,MSP),VARS(0:35+MSP,2),CS(0:9+MSP,NCELLS,MSP),CSS(0:8,2,MSP,2),CSSS(6,2),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR SAMPLING ARRAYS',ERROR
// ENDIF
//
if(gas->MMVM >= 0){
output->d_allocate(gas->MSP+1,gas->MMVM+1,151,output->VIBFRAC);
output->d_allocate(gas->MSP+1,gas->MMVM+1,output->SUMVIB);
// ALLOCATE (VIBFRAC(MSP,MMVM,0:150),SUMVIB(MSP,MMVM),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR RECOMBINATION ARRAYS',ERROR
// ENDIF
}
//
ALLOCATE_GAS();
//
//102 CONTINU
_102:
file_7.open("RESTART.DAT", ios::in | ios::binary);
if(file_7.is_open()){
cout<<"RESTART.DAT opened successfully"<<endl;
/*file_7>>calc->AJM>>calc->ALOSS>>output->AVDTM>>BOLTZ>>geom->CCELL>>geom->CELL>>output->CLSEP>>output->COLLS>>calc->CPDTM>>gas->CR>>output->CS>>output->CSS>>output->CSSS>>gas->CTM>>gas->CXSS>>geom->DDIV>>DPI>>calc->DTM>>calc->DTSAMP>>calc->DTOUT>>calc->EME>>calc->ENTMASS>>gas->ENTR>>calc->ENTREM>>calc->ERROR>>gas->ERS>>gas->FDEN>>gas->FMA>>gas->FND>>calc->FNUM>>calc->FRACSAM>>gas->FSP>>gas->FP>>gas->FPM>>gas->FPR>>geom->FREM>>gas->FSPEC>>gas->FTMP>>calc->FTIME>>gas->FVTMP>>geom->ICCELL>>geom->ICELL>>calc->ICLASS>>calc->ICN>>molecs->ICREF>>geom->IFX>>gas->IGAS>>calc->IMTS>>molecs->IPCELL>>molecs->IPCP>>molecs->IPSP>>molecs->IPVIB>>calc->IREM>>calc->ISAD>>calc->ISECS>>calc->ISF>>gas->ISPEX>>gas->ISPR>>gas->ISPRC>>gas->ISPRK>>gas->ISPV>>gas->ISPVM>>gas->ISRCD>>geom->ITYPE>>geom->IVB>>geom->IWF>>geom->JDIV>>gas->LIS>>gas->LRS>>calc->MOLSC>>calc->MVER>>geom->NCCELLS>>geom->NCELLS>>geom->NCIS>>geom->NDIV>>gas->NELL>>gas->NEX>>calc->NLINE>>molecs->NM>>output->NMISAMP>>calc->NNC>>output->NOUT>>output->NSAMP>>gas->NSLEV>>gas->NSPEX>>calc->NREL>>calc->NVER>>molecs->PELE>>PI>>molecs->PROT>>molecs->PTIM>>molecs->PV>>molecs->PX>>gas->QELC>>gas->RGFS>>gas->RMAS>>gas->SLER>>gas->SP>>gas->SPEX>>SPI>>gas->SPM>>gas->SPR>>gas->SPRC>>gas->SPREX>>gas->SPRP>>gas->SPRT>>gas->SPV>>gas->SPVM>>output->SREAC>>output->SUMVIB>>calc->TCOL>>calc->TDISS>>calc->TRECOMB>>output->TISAMP>>calc->TPOUT>>calc->TREF>>calc->TLIM>>calc->TOTCOL>>calc->TOTMOV>>gas->TREACG>>gas->TREACL>>calc->TOUT>>calc->TPDTM>>calc->TREF>>calc->TSAMP>>gas->TSURF>>output->VAR>>output->VARS>>output->VARSP>>geom->VELOB>>gas->VFX>>gas->VFY>>output->VIBFRAC>>gas->VMP>>gas->VMPM>>calc->VNMAX>>gas->VSURF>>output->WCOLLS>>geom->WFM>>geom->XB>>geom->XREM>>output->XVELS>>output->YVELS>>gas->TNEX>>ZCHECK>>endl;*/
file_7.read((char*)&calc,sizeof(calc));
file_7.read((char*)&molecs,sizeof(molecs));
file_7.read((char*)&gas,sizeof(gas));
file_7.read((char*)&geom,sizeof(geom));
file_7.read((char*)&output,sizeof(output));
file_7.close();
}
else{
cout<<"Restart.DAT not opening"<<endl;
goto _102;
}
// OPEN (7,FILE='RESTART.DAT',FORM='BINARY',ERR=102)
// READ (7) AJM,ALOSS,AVDTM,BOLTZ,CCELL,CELL,CLSEP,COLLS, &
// CPDTM,CR,CS,CSS,CSSS,CTM,CXSS,DDIV,DPI,DTM,DTSAMP,DTOUT,EME, &
// ENTMASS,ENTR,ENTREM,ERROR,ERS,FDEN,FMA,FND,FNUM,FRACSAM,FSP,FP,FPM,FPR,FREM,FSPEC, &
// FTMP,FTIME,FVTMP,ICCELL,ICELL,ICLASS,ICN,ICREF,IFX,IGAS,IMTS,IPCELL,IPCP, &
// IPSP,IPVIB,IREM,ISAD,ISECS,ISF,ISPEX,ISPR,ISPRC,ISPRK,ISPV,ISPVM,ISRCD,ITYPE,IVB,IWF, &
// JDIV,LIS,LRS,MOLSC,MVER,NCCELLS,NCELLS, &
// NCIS,NDIV,NELL,NEX,NLINE,NM,NMISAMP,NNC,NOUT,NSAMP,NSLEV,NSPEX,NREL,NVER,PELE,PI,PROT,PTIM,PV,PX, &
// QELC,RGFS,RMAS,SLER,SP,SPEX,SPI,SPM,SPR,SPRC,SPREX,SPRP,SPRT,SPV,SPVM,SREAC,SUMVIB, &
// TCOL,TDISS,TRECOMB,TISAMP,TPOUT,TREF,TLIM,TOTCOL,TOTMOV, &
// TREACG,TREACL,TOUT,TPDTM,TREF,TSAMP,TSURF,VAR,VARS,VARSP,VELOB,VFX,VFY,VIBFRAC,VMP, &
// VMPM,VNMAX,VSURF,WCOLLS,WFM,XB,XREM,XVELS,YVELS,TNEX,ZCHECK
// //
// CLOSE(7)
//
if(ZCHECK != 1234567){
file_9<<molecs->NM<<" Molecules, Check integer = "<<ZCHECK<<endl;
//WRITE (9,*) NM,' Molecules, Check integer =',ZCHECK
return ;
}
else
file_9<<"Restart file read, Check integer= "<<ZCHECK<<endl;
//WRITE (9,*) 'Restart file read, Check integer=',ZCHECK
//
return;
//
}
//*****************************************************************************
void WRITE_RESTART()
{
//MOLECS molecs;
//GEOM_1D geom;
//GAS gas;
//CALC calc;
//OUTPUT output;
// IMPLICIT NONE
//
int ZCHECK;
//
fstream file_7;
ZCHECK=1234567;
//
//101 CONTINUE
_101:
file_7.open("PARAMETERS.DAT", ios::out | ios::binary);
if(file_7.is_open()){
file_7<<geom->NCCELLS<<endl<<geom->NCELLS<<endl<<gas->MMRM<<endl<<gas->MMVM<<endl<<molecs->MNM<<endl<<gas->MNSR<<endl<<gas->MSP<<endl<<geom->ILEVEL<<endl<<geom->MDIV<<endl<<gas->MMEX<<endl<<gas->MEX<<endl<<gas->MELE<<endl<<gas->MVIBL<<endl<<calc->NCLASS<<endl;
file_7.close();
}
else{
cout<<"Parameters.DAT file not opening(write)"<<endl;
goto _101;
}
// OPEN (7,FILE='PARAMETERS.DAT',FORM='BINARY',ERR=101)
// WRITE (7) NCCELLS,NCELLS,MMRM,MMVM,MNM,MNSR,MSP,ILEVEL,MDIV,MMEX,MEX,MELE,MVIBL,NCLASS
// CLOSE(7)
//
// 102 CONTINUE
_102:
file_7.open("RESTART.DAT", ios::out | ios::binary);
if(file_7.is_open()){
/*file_7<<calc->AJM<<calc->ALOSS<<output->AVDTM<<BOLTZ<<geom->CCELL<<geom->CELL<<output->CLSEP<<output->COLLS<<calc->CPDTM<<gas->CR<<output->CS<<output->CSS<<output->CSSS<<gas->CTM<<gas->CXSS<<geom->DDIV<<DPI<<calc->DTM<<calc->DTSAMP<<calc->DTOUT<<calc->EME<<calc->ENTMASS<<gas->ENTR<<calc->ENTREM<<calc->ERROR<<gas->ERS<<gas->FDEN<<gas->FMA<<gas->FND<<calc->FNUM<<calc->FRACSAM<<gas->FSP<<gas->FP<<gas->FPM<<gas->FPR<<geom->FREM<<gas->FSPEC<<gas->FTMP<<calc->FTIME<<gas->FVTMP<<geom->ICCELL<<geom->ICELL<<calc->ICLASS<<calc->ICN<<molecs->ICREF<<geom->IFX<<gas->IGAS<<calc->IMTS<<molecs->IPCELL<<molecs->IPCP<<molecs->IPSP<<molecs->IPVIB<<calc->IREM<<calc->ISAD<<calc->ISECS<<calc->ISF<<gas->ISPEX<<gas->ISPR<<gas->ISPRC<<gas->ISPRK<<gas->ISPV<<gas->ISPVM<<gas->ISRCD<<geom->ITYPE<<geom->IVB<<geom->IWF<<geom->JDIV<<gas->LIS<<gas->LRS<<calc->MOLSC<<calc->MVER<<geom->NCCELLS<<geom->NCELLS<<geom->NCIS<<geom->NDIV<<gas->NELL<<gas->NEX<<calc->NLINE<<molecs->NM<<output->NMISAMP<<calc->NNC<<output->NOUT<<output->NSAMP<<gas->NSLEV<<gas->NSPEX<<calc->NREL<<calc->NVER<<molecs->PELE<<PI<<molecs->PROT<<molecs->PTIM<<molecs->PV<<molecs->PX<<gas->QELC<<gas->RGFS<<gas->RMAS<<gas->SLER<<gas->SP<<gas->SPEX<<SPI<<gas->SPM<<gas->SPR<<gas->SPRC<<gas->SPREX<<gas->SPRP<<gas->SPRT<<gas->SPV<<gas->SPVM<<output->SREAC<<output->SUMVIB<<calc->TCOL<<calc->TDISS<<calc->TRECOMB<<output->TISAMP<<calc->TPOUT<<calc->TREF<<calc->TLIM<<calc->TOTCOL<<calc->TOTMOV<<gas->TREACG<<gas->TREACL<<calc->TOUT<<calc->TPDTM<<calc->TREF<<calc->TSAMP<<gas->TSURF<<output->VAR<<output->VARS<<output->VARSP<<geom->VELOB<<gas->VFX<<gas->VFY<<output->VIBFRAC<<gas->VMP<<gas->VMPM<<calc->VNMAX<<gas->VSURF<<output->WCOLLS<<geom->WFM<<geom->XB<<geom->XREM<<output->XVELS<<output->YVELS<<gas->TNEX<<ZCHECK<<endl;*/
file_7.write((char*)&calc,sizeof(calc));
file_7.write((char*)&molecs,sizeof(molecs));
file_7.write((char*)&gas,sizeof(gas));
file_7.write((char*)&geom,sizeof(geom));
file_7.write((char*)&output,sizeof(output));
file_7.close();
}
else{
cout<<"Restart.DAT file not opening(write)"<<endl;
goto _101;
}
// OPEN (7,FILE='RESTART.DAT',FORM='BINARY',ERR=102)
// WRITE (7)AJM,ALOSS,AVDTM,BOLTZ,CCELL,CELL,CLSEP,COLLS, &
// CPDTM,CR,CS,CSS,CSSS,CTM,CXSS,DDIV,DPI,DTM,DTSAMP,DTOUT,EME, &
// ENTMASS,ENTR,ENTREM,ERROR,ERS,FDEN,FMA,FND,FNUM,FRACSAM,FSP,FP,FPM,FPR,FREM,FSPEC, &
// FTMP,FTIME,FVTMP,ICCELL,ICELL,ICLASS,ICN,ICREF,IFX,IGAS,IMTS,IPCELL,IPCP, &
// IPSP,IPVIB,IREM,ISAD,ISECS,ISF,ISPEX,ISPR,ISPRC,ISPRK,ISPV,ISPVM,ISRCD,ITYPE,IVB,IWF, &
// JDIV,LIS,LRS,MOLSC,MVER,NCCELLS,NCELLS, &
// NCIS,NDIV,NELL,NEX,NLINE,NM,NMISAMP,NNC,NOUT,NSAMP,NSLEV,NSPEX,NREL,NVER,PELE,PI,PROT,PTIM,PV,PX, &
// QELC,RGFS,RMAS,SLER,SP,SPEX,SPI,SPM,SPR,SPRC,SPREX,SPRP,SPRT,SPV,SPVM,SREAC,SUMVIB, &
// TCOL,TDISS,TRECOMB,TISAMP,TPOUT,TREF,TLIM,TOTCOL,TOTMOV, &
// TREACG,TREACL,TOUT,TPDTM,TREF,TSAMP,TSURF,VAR,VARS,VARSP,VELOB,VFX,VFY,VIBFRAC,VMP, &
// VMPM,VNMAX,VSURF,WCOLLS,WFM,XB,XREM,XVELS,YVELS,TNEX,ZCHECK
// //
// CLOSE(7)
//
file_9<<"Restart files written"<<endl;
//WRITE (9,*) 'Restart files written'
//
return;
}
void OUTPUT_RESULTS()
{
//--calculate the surface and flowfield properties
//--generate TECPLOT files for displaying these properties
//--calculate collisiion rates and flow transit times and reset time intervals
//--add molecules to any flow plane molecule output files
//CALC calc;
//MOLECS molecs;
//GAS gas;
//OUTPUT output;
//GEOM_1D geom;
fstream file_3;
fstream file_10;
fstream file_7;
int IJ,J,JJ,K,L,LL,M,N,NN,NMCR,CTIME,II;
long long NNN;
double AS,AT,C1,C2,C3,C4,C5,C6,C7,C8,C9;
double A,B,C,SDTM,SMCR,DOF,AVW,UU,VDOFM,TVIBM,VEL,DTMI,TT;
//dout
double SUM[14];
double SUMS[10][3];
double *TVIB,*VDOF,*PPA,*TEL,*ELDOF,*SDOF,*CDTM;
double **TV,**THCOL;
double ***DF;
int *NMS;
// REAL(KIND=8), ALLOCATABLE, DIMENSION(:) :: TVIB,VDOF,PPA,TEL,ELDOF,SDOF,CDTM
// REAL(KIND=8), ALLOCATABLE, DIMENSION(:,:) :: TV,THCOL
// REAL(KIND=8), ALLOCATABLE, DIMENSION(:,:,:) :: DF
// INTEGER, ALLOCATABLE, DIMENSION(:) :: NMS
//INTEGER, ALLOCATABLE, DIMENSION(:,:) ::
string F,E;
//--CTIME computer time (microseconds)
//--SUMS(N,L) sum over species of CSS(N,J,L,M) for surface properties
//
//--For flowfield properties,where <> indicates sampled sum
//--SUM(0) the molecular number sum over all species
//--SUM(1) the weighted number sum over all species
//--SUM(2) the weighted sum of molecular masses
//--SUM(3),(4),(5) the weighted sum over species of m*<u>,<v>,<w>
//--SUM(6) the weighted sum over species of m*(<u**2>+<v**2>+<w**2>)
//--SUM(7) the weighted sum over species of <u**2>+<v**2>+<w**2>
//--SUM(8) the weighted sum of rotational energy
//--SUM(9) the weighted sum of rotational degrees of freedom
//--SUM(10) the weighted sum over species of m*<u**2>
//--SUM(11) the weighted sum over species of m*<v**2>
//--SUM(12) sum over species of m*<w**2>
//--SUM(13) the weighted sum of electronic energy
//--UU velocity squared
//--DOF degrees of freedom
//--AVW the average value of the viscosity-temperature exponent
//--DVEL velocity difference
//--TVEL thermal speed
//--SMCR sum of mcs/mfp over cells
//--NMCR number in the sum
//--VDOFM effective vibrational degrees of freedom of mixture
//--TVIB(L)
//--VDOF(L)
//--TV(K,L) the temperature of vibrational mode K of species L
//--PPA particles per atom
//--NMS number per species
//--SDOF(L) total degrees of freedom for species L
//
//
//--calculate the flowfield properties in the cells
//dout
TV = new double*[gas->MMVM+1];
for(int i =0; i< gas->MMVM+1; ++i)
TV[i] = new double[gas->MSP+1];
TVIB = new double[gas->MSP+1];
DF = new double **[geom->NCELLS+1];
for (int i = 0; i < geom->NCELLS+1; ++i)
{
DF[i] = new double *[gas->MMVM+1];
for (int j = 0; j < gas->MMVM+1; ++j)
DF[i][j] = new double [gas->MSP+1];
}
VDOF= new double[gas->MSP+1];
TEL = new double[gas->MSP+1];
ELDOF = new double[gas->MSP+1];
PPA = new double[gas->MSP+1];
NMS = new int[gas->MSP+1];
THCOL = new double*[gas->MSP+1];
for(int i =0; i< gas->MSP+1; ++i)
THCOL[i] = new double[gas->MSP+1];
SDOF = new double[gas->MSP+1];
CDTM = new double[geom->NCELLS+1];
// ALLOCATE (TV(MMVM,MSP),TVIB(MSP),DF(NCELLS,MMVM,MSP),VDOF(MSP),TEL(MSP),ELDOF(MSP),PPA(MSP),NMS(MSP),THCOL(MSP,MSP) &
// ,SDOF(MSP),CDTM(NCELLS),STAT=ERROR)
// if(calc->ERROR!=0)
// {
// cout<<"ROGRAM COULD NOT ALLOCATE OUTPUT VARIABLES"<<calc->ERROR<<endl;
// }
if(calc->FTIME>0.5e00*calc->DTM)
{
output->NOUT+=1;
if(output->NOUT>9999)
output->NOUT=output->NOUT-9999;
cout<<"Generating files for output interval"<<output->NOUT<<endl;
if(calc->ISF==0)
{
//dout
//OPEN (3,FILE='DS1OUT.DAT')
file_3.open("DS1OUT.DAT" , ios::out);
if(file_3.is_open()){
cout<<"DS1OUT.DAT is opened"<<endl;
}
else{
cout<<"DS1OUT.DAT not opened"<<endl;
}
//F='DS';//E//'.OUT'
}
else
{
//--the files are DS1n.DAT, where n is a four digit integer equal to NOUT
//dout
//500 FORMAT(I5)
//ENCODE(5,500,E) 10000+NOUT
int a=output->NOUT+10000;
E=to_string(a);
F="DS" + E + "OUT.DAT";
//dout
//file_3.open(F.c_str(), ios::out|ios::binary);
if(file_3.is_open()){
cout<<F<<" is opened"<<endl;
}
else{
cout<<F<<" not opened"<<endl;
}
//OPEN (3,FILE=F)
}
}
//dout
//memset(output->VAR,0.e00,sizeof(**output->VAR));
for(int i=0;i<24;i++){
for(int j=0;j<geom->NCELLS+1;j++)
output->VAR[i][j]=0.e00;
}
if(geom->IFX==0)
A=calc->FNUM/(calc->FTIME-output->TISAMP);
for(JJ=1;JJ<=2;JJ++)
{
if(geom->IFX==1)
A=calc->FNUM/(2.e00*PI*geom->XB[JJ])*(calc->FTIME-output->TISAMP);
if(geom->IFX==2)
A=calc->FNUM/(4.e00*PI*geom->XB[JJ])*geom->XB[JJ]*(calc->FTIME-output->TISAMP);
//--JJ=1 for surface at XB(1), JJ=2 for surface at XB(2)
if(geom->ITYPE[JJ]==2)
{
//dout
//memset(SUMS,0.e00,sizeof(SUMS));
for(int i=0;i<10;i++){
for(int j=0;j<3;j++)
SUMS[i][j]=0.e00;
}
for( L=1;L<=gas->MSP;L++)
{
for(J=0;J<=8;J++)
{
for(IJ=1;IJ<=2;IJ++)
{
SUMS[J][IJ]=SUMS[J][IJ]+output->CSS[J][JJ][L][IJ];
}
}
}
output->VARS[0][JJ]=SUMS[0][1];
output->VARS[1][JJ]=SUMS[1][1];
output->VARS[2][JJ]=SUMS[1][2];
output->VARS[3][JJ]=SUMS[1][1]*A;
output->VARS[4][JJ]=SUMS[1][2]*A;
output->VARS[5][JJ]=SUMS[2][1]*A;
output->VARS[6][JJ]=SUMS[2][2]*A;
output->VARS[7][JJ]=SUMS[3][1]*A;
output->VARS[8][JJ]=SUMS[3][2]*A;
output->VARS[9][JJ]=SUMS[4][1]*A;
output->VARS[10][JJ]=SUMS[4][2]*A;
output->VARS[11][JJ]=SUMS[5][1]*A;
output->VARS[12][JJ]=SUMS[5][2]*A;
output->VARS[13][JJ]=SUMS[6][1]*A;
output->VARS[14][JJ]=SUMS[6][2]*A;
output->VARS[15][JJ]=SUMS[7][1]*A;
output->VARS[16][JJ]=SUMS[7][2]*A;
output->VARS[33][JJ]=SUMS[8][1]*A;
output->VARS[34][JJ]=SUMS[8][2]*A;
// VARS(17,JJ)=SUMS(9,1)*A //--SURFACE REACTIONS NOT YET IMPLEMENTED
// VARS(18,JJ)=SUMS(9,2)*A
if(output->CSSS[1][JJ]>1.e-6)
{
output->VARS[19][JJ]=output->CSSS[3][JJ]/output->CSSS[2][JJ]; ////--n.b. must be modified to include second component in 3D
output->VARS[20][JJ]=(output->CSSS[4][JJ]-output->CSSS[2][JJ]*output->VARS[19][JJ]*output->VARS[19][JJ])/(output->CSSS[1][JJ]*3.e00*BOLTZ)-gas->TSURF[JJ];
output->VARS[19][JJ]=output->VARS[19][JJ]-gas->VSURF[JJ];
if(output->CSSS[6][JJ]>1.e-6)
{
output->VARS[21][JJ]=(2.e000/BOLTZ)*(output->CSSS[5][JJ]/output->CSSS[6][JJ])-gas->TSURF[JJ];
}
else
{
output->VARS[21][JJ]=0.e00;
}
}
else
{
output->VARS[19][JJ]=0.e00;
output->VARS[20][JJ]=0.e00;
output->VARS[21][JJ]=0.e00;
}
output->VARS[22][JJ]=(SUMS[2][1]+SUMS[2][2])*A;
output->VARS[23][JJ]=(SUMS[3][1]+SUMS[3][2])*A;
output->VARS[24][JJ]=(SUMS[4][1]+SUMS[4][2])*A;
output->VARS[25][JJ]=(SUMS[5][1]+SUMS[5][2])*A;
output->VARS[26][JJ]=(SUMS[6][1]+SUMS[6][2])*A;
output->VARS[27][JJ]=(SUMS[7][1]+SUMS[7][2])*A;
output->VARS[28][JJ]=(SUMS[9][1]+SUMS[9][2])*A;
output->VARS[29][JJ]=output->VARS[11][JJ]+output->VARS[13][JJ]+output->VARS[15][JJ]+output->VARS[33][JJ];
output->VARS[30][JJ]=output->VARS[12][JJ]+output->VARS[14][JJ]+output->VARS[16][JJ]+output->VARS[34][JJ];
output->VARS[31][JJ]=output->VARS[29][JJ]+output->VARS[30][JJ];
output->VARS[35][JJ]=output->VARS[33][JJ]+output->VARS[34][JJ];
for(L=1;gas->MSP;L++)
{
if(SUMS[1][1]>0)
{
output->VARS[35+L][JJ]=100*output->CSS[1][JJ][L][1]/SUMS[1][1];
}
else
{
output->VARS[35+L][JJ]=0.0;
}
}
}
}
//output->VARSP=0;
for(int i=0;i<13;i++){
for(int j=0;j<geom->NCELLS+1;j++){
for(int k=0;k<gas->MSP+1;k++)
output->VARSP[i][j][k]=0;
}
}
SMCR=0;
NMCR=0;
for(N=1;N<=geom->NCELLS;N++)
{
if(N==120)
{
continue;
}
A=calc->FNUM/(geom->CELL[4][N])*output->NSAMP;
if(geom->IVB==1)
A=A*pow((geom->XB[2]-geom->XB[1])/(geom->XB[2]+geom->VELOB*0.5e00*(calc->FTIME-output->TISAMP)-geom->XB[1]),geom->IFX+1);
//--check the above for non-zero XB(1)
//dout
//memset(SUM,0,sizeof(SUM));
for(int i=0;i<14;i++)
SUM[i]=0;
NMCR+=1;
for(L=1;L<=gas->MSP;L++)
{
SUM[0]=SUM[0]+output->CS[0][N][L];
SUM[1]=SUM[1]+output->CS[1][N][L];
SUM[2]=SUM[2]+gas->SP[5][L]*output->CS[0][N][L];
for(K=1;K<=3;K++)
{
SUM[K+2]=SUM[K+2]+gas->SP[5][L]*output->CS[K+1][N][L];
if(output->CS[1][N][L]>1.1e00)
{
output->VARSP[K+1][N][L]=output->CS[K+4][N][L]/output->CS[1][N][L];
//--VARSP(2,3,4 are temporarily the mean of the squares of the velocities
output->VARSP[K+8][N][L]=output->CS[K+1][N][L]/output->CS[1][N][L];
}
}
SUM[6]=SUM[6]+gas->SP[5][L]*(output->CS[5][N][L]+output->CS[6][N][L]+output->CS[7][N][L]);
SUM[10]=SUM[10]+gas->SP[5][L]*output->CS[5][N][L];
SUM[12]=SUM[11]+gas->SP[5][L]*output->CS[6][N][L];
SUM[12]=SUM[12]+gas->SP[5][L]*output->CS[7][N][L];
SUM[13]=SUM[13]+output->CS[9][N][L];
if(output->CS[1][N][L]>0.5e00)
SUM[7]=SUM[7]+output->CS[5][N][L]+output->CS[6][N][L]+output->CS[7][N][L];
if(gas->ISPR[1][L]>0)
{
SUM[8]=SUM[8]+output->CS[8][N][L];
SUM[9]=SUM[9]+output->CS[1][N][L]*gas->ISPR[1][L];
}
}
AVW=0;
for(L=1;L<=gas->MSP;L++)
{
output->VARSP[0][N][L]=output->CS[1][N][L];
output->VARSP[1][N][L]=0.e00;
output->VARSP[6][N][L]=0.0;
output->VARSP[7][N][L]=0.0;
output->VARSP[8][N][L]=0.0;
if(SUM[1]>0.1)
{
output->VARSP[1][N][L]=output->CS[1][N][L]/SUM[1];
AVW=AVW+gas->SP[3][L]*output->CS[1][N][L]/SUM[1];
if(gas->ISPR[1][L]>0 && output->CS[1][N][L]>0.5)
output->VARSP[6][N][L]=(2.e00/BOLTZ)*output->CS[8][N][L]/((double)(gas->ISPR[1][L])*output->CS[1][N][L]);
}
output->VARSP[5][N][L]=0;
for(K=1;K<=3;K++)
{
output->VARSP[K+1][N][L]=(gas->SP[5][L]/BOLTZ)*(output->VARSP[K+1][N][L]-pow(output->VARSP[K+8][N][L],2));
output->VARSP[5][N][L]=output->VARSP[5][N][L]+output->VARSP[K+1][N][L];
}
output->VARSP[5][N][L]=output->VARSP[5][N][L]/3.e00;
output->VARSP[8][N][L]=(3.e00*output->VARSP[5][N][L]+(double)gas->ISPR[1][L]*output->VARSP[6][N][L])/(3.e00+(double)(gas->ISPR[1][L]));
}
if(geom->IVB==0)
output->VAR[1][N]=geom->CELL[1][N];
if(geom->IVB==1)
{
C=(geom->XB[2]+geom->VELOB*calc->FTIME-geom->XB[1])/(double)(geom->NDIV); //new DDIV
output->VAR[1][N]=geom->XB[1]+((double)(N-1)+0.5)*C;
}
output->VAR[2][N]=SUM[0];
if(SUM[1]>0.5)
{
output->VAR[3][N]=SUM[1]*A;//--number density Eqn. (4.28)
output->VAR[4][N]=output->VAR[3][N]*SUM[2]/SUM[1]; //--density Eqn. (4.29)
output->VAR[5][N]=SUM[3]/SUM[2];//--u velocity component Eqn. (4.30)
output->VAR[6][N]=SUM[4]/SUM[2]; //--v velocity component Eqn. (4.30)
output->VAR[7][N]=SUM[5]/SUM[2]; //--w velocity component Eqn. (4.30)
UU= pow(output->VAR[5][N],2)+pow(output->VAR[6][N],2)+pow(output->VAR[7][N],2);
if(SUM[1]>1)
{
output->VAR[8][N]=(fabs(SUM[6]-SUM[2]*UU))/(3.e00*BOLTZ*SUM[1]); //Eqn. (4.39)
//--translational temperature
output->VAR[19][N]=fabs(SUM[10]-SUM[2]*pow(output->VAR[5][N],2))/(BOLTZ*SUM[1]);
output->VAR[20][N]=fabs(SUM[11]-SUM[2]*pow(output->VAR[6][N],2))/(BOLTZ*SUM[1]);
output->VAR[21][N]=fabs(SUM[12]-SUM[2]*pow(output->VAR[7][N],2))/(BOLTZ*SUM[1]);
}
else
{
output->VAR[8][N]=1.0;
output->VAR[19][N]=1.0;
output->VAR[20][N]=1.0;
output->VAR[21][N]=1.0;
}
if(SUM[9]>0.1e00)
{
output->VAR[9][N]=(2.e00/BOLTZ)*SUM[8]/SUM[9]; ////--rotational temperature Eqn. (4.36)
}
else
output->VAR[9][N]=0.0;
output->VAR[10][N]=gas->FTMP[1]; ////vibration default
DOF=(3.e00+SUM[9])/SUM[1];
output->VAR[11][N]=(3.0*output->VAR[8][N]+(SUM[9]/SUM[1]))*output->VAR[9][N]/DOF;
//--overall temperature based on translation and rotation
output->VAR[18][N]=output->VAR[3][N]*BOLTZ*output->VAR[8][N];
//--scalar pressure (now (from V3) based on the translational temperature)
if(gas->MMVM>0)
{
for(L=1;L<=gas->MSP;L++)
{
VDOF[L]=0.0;
//dout
if(gas->ISPV[L] > 0)
{
for(K=1;K<=gas->ISPV[L];K++)
{
if(output->CS[K+9][N][L]<BOLTZ)
{
TV[K][L]=0.0;
DF[N][K][L]=0.0;
}
else
{
TV[K][L]=gas->SPVM[1][K][L]/log(1.0+output->CS[1][N][L]/output->CS[K+9][N][L]) ;//--Eqn.(4.45)
DF[N][K][L]=2.0*(output->CS[K+9][N][L]/output->CS[1][N][L])*log(1.0+output->CS[1][N][L]/output->CS[K+9][N][L]); //--Eqn. (4.46)
}
VDOF[L]=VDOF[L]+DF[N][K][L];
}
//memset(TVIB,0.0,sizeof(*TVIB));
for(int i=0;i<gas->MSP+1;i++)
TVIB[i]=0.0;
for(K=1;K<=gas->ISPV[L];K++)
{
if(VDOF[L]>1.e-6)
{
TVIB[L]=TVIB[L]+TV[K][L]*DF[N][K][L]/VDOF[L];
}
else
TVIB[L]=gas->FVTMP[1];
}
}
else
{
TVIB[L]=calc->TREF;
VDOF[L]=0.0;
}
output->VARSP[7][N][L]=TVIB[L];
}
VDOFM=0.0;
TVIBM=0.0;
A=0.e00;
for(L=1;L<=gas->MSP;L++)
{
//dout
if(gas->ISPV[L] > 0)
{
A=A+output->CS[1][N][L];
}
}
for(L=1;L<=gas->MSP;L++)
{
//dout
if(gas->ISPV[L] > 0)
{
VDOFM=VDOFM+VDOF[L]-output->CS[1][N][L]/A;
TVIBM=TVIBM+TVIB[L]-output->CS[1][N][L]/A;
}
}
output->VAR[10][N]=TVIBM;
}
for(L=1;L<=gas->MSP;L++)
{
if(output->VARSP[0][N][L]>0.5)
{
//--convert the species velocity components to diffusion velocities
for(K=1;K<=3;K++)
{
output->VARSP[K+8][N][L]=output->VARSP[K+8][N][L]-output->VAR[K+4][N];
}
if(gas->MELE>1)
{
//--calculate the electronic temperatures for the species
//memset(ELDOF,0.e00,sizeof(*ELDOF));
for(int i=0;i<gas->MSP+1;i++)
ELDOF[i] = 0.e00;
//dout
//memset(TEL,0.e00,sizeof(*TEL));
for(int i=0;i<gas->MSP+1;i++)
TEL[i] = 0.e00;
if(gas->MELE>1)
{
A=0.e00;
B=0.e00;
for(M=1;M<=gas->NELL[L];M++)
{
if(output->VARSP[5][N][L]>1.e00)
{
C=gas->QELC[2][M][L]/output->VARSP[5][N][L];
A=A+gas->QELC[1][M][L]*exp(-C);
B=B+gas->QELC[1][M][L]*C*exp(-C);
}
}
if(B>1.e-10)
{
TEL[L]=output->CS[9][N][L]/output->CS[1][N][L]/(BOLTZ*B/A);
}
else
TEL[L]=output->VAR[11][N];
output->VARSP[12][N][L]=TEL[L];
ELDOF[L]=0.e00;
if(output->VARSP[5][N][L]>1.e00)
ELDOF[L]=2.e00*output->CS[9][N][L]/output->CS[1][N][L]/(BOLTZ*output->VARSP[5][N][L]);
if(ELDOF[L]<0.01)
{
output->VARSP[12][N][L]=output->VAR[11][N];
}
}
else
{
ELDOF[L]=0.0;
}
}
}
else
{
for(K=8;K<=12;K++)
{
output->VARSP[K][N][L]=0.e00;
}
}
}
//--set the overall electronic temperature
if(gas->MELE>1)
{
C=0.e00;
for(L=1;L<=gas->MSP;L++)
{
if(ELDOF[L]>1.e-5)
C=C+output->CS[1][N][L];
}
if(C>0.e00)
{
A=0.e00;
B=0.e00;
for(L=1;L<=gas->MSP;L++)
{
if(ELDOF[L]>1.e-5)
{
A=A+output->VARSP[12][N][L]*output->CS[1][N][L];
B=B+output->CS[1][N][L];
}
}
output->VAR[22][N]=A/B;
}
else{
output->VAR[22][N]=output->VAR[11][N];
}
}
else{
output->VAR[22][N]=gas->FTMP[1];
}
if(gas->MMVM>0)
{
//--set the overall temperature and degrees of freedom for the individual species
for(L=1;L<=gas->MSP;L++)
{
if(gas->MELE>1){
SDOF[L]=3.e00+gas->ISPR[1][L]+VDOF[L]+ELDOF[L];
output->VARSP[8][N][L]=(3.0*output->VARSP[5][N][L]+gas->ISPR[1][L]*output->VARSP[6][N][L]+VDOF[L]*output->VARSP[7][N][L]+ELDOF[L]*output->VARSP[12][N][L])/SDOF[L];
}
else{
SDOF[L]=3.e00+gas->ISPR[1][L]+VDOF[L]+ELDOF[L];
output->VARSP[8][N][L]=(3.0*output->VARSP[5][N][L]+gas->ISPR[1][L]*output->VARSP[6][N][L]+VDOF[L]*output->VARSP[7][N][L])/SDOF[L];
}
}
//--the overall species temperature now includes vibrational and electronic excitation
//--the overall gas temperature can now be set
A=0.e00;
B=0.e00;
for(L=1;L<=gas->MSP;L++)
{
A=A+SDOF[L]+output->VARSP[8][N][L]*output->CS[1][N][L];
B=B+SDOF[L]*output->CS[1][N][L];
}
output->VAR[11][N]=A/B;
}
VEL=sqrt(pow(output->VAR[5][N],2)+pow(output->VAR[6][N],2)+pow(output->VAR[7][N],2));
output->VAR[12][N]=VEL/sqrt((DOF+2.e00)*output->VAR[11][N]*(SUM[1]*BOLTZ/SUM[2]))/DOF;
//--Mach number
output->VAR[13][N]=SUM[0]/output->NSAMP; ////--average number of molecules in cell
//dout
if(output->COLLS[N] > 2.0)
{
output->VAR[14][N]=0.5e00*(calc->FTIME-output->TISAMP)*(SUM[1]/output->NSAMP)/output->WCOLLS[N];
//--mean collision time
output->VAR[15][N]=0.92132e00*sqrt(fabs(SUM[7]/SUM[1]-UU))*output->VAR[14][N];
//--mean free path (based on r.m.s speed with correction factor based on equilibrium)
output->VAR[16][N]=output->CLSEP[N]/(output->COLLS[N]*output->VAR[15][N]);
}
else{
output->VAR[14][N]=1.e10;
output->VAR[15][N]=1.e10/output->VAR[3][N];
//--m.f.p set by nominal values
}
}
else
{
for(L=3;L<=22;L++)
{
output->VAR[L][N]=0.0;
}
}
output->VAR[17][N]=VEL;
}
if(calc->FTIME>0.e00*calc->DTM)
{
if(calc->ICLASS==1){
if(geom->IFX==0)
file_3<<"DSMC program for a one-dimensional plane flow"<<endl;//WRITE (3,*) 'DSMC program for a one-dimensional plane flow';
if(geom->IFX==1)
file_3<<"DSMC program for a cylindrical flow"<<endl;//WRITE (3,*) 'DSMC program for a one-dimensional plane flow';
if(geom->IFX==2)
file_3<<"DSMC program for a spherical flow"<<endl;//WRITE (3,*) 'DSMC program for a one-dimensional plane flow';
}
file_3<<endl;//WRITE (3,*)
file_3<<"Interval "<<output->NOUT<<" Time "<<calc->FTIME<< " with "<<output->NSAMP<<" samples from "<<output->TISAMP<<endl;
//WRITE (3,*) 'Interval',output->NOUT,'Time ',calc->FTIME, ' with',output->NSAMP,' samples from',output->TISAMP
//990 FORMAT(I7,G13.5,I7,G13.5)
//Dout
NNN=calc->TOTMOV;
cout<<"TOTAL MOLECULES = "<< molecs->NM<<endl;
//dout
//NMS=0;
for(int i=0;i<gas->MSP+1;i++)
NMS[i]=0;
for(N=1;N<=molecs->NM;N++)
{
M=molecs->IPSP[N];
NMS[M]+=1;
}
file_3<<"Total simulated molecules = "<<molecs->NM<<endl;
for(N=1;N<=gas->MSP;N++)
{
cout<< " SPECIES "<<N<<" TOTAL = "<<NMS[N]<<endl;
file_3<<"Species "<<N<<" total = "<<NMS[N]<<endl;
}
if(gas->MEX>0)
{
ENERGY(0,A);
for(N=1;N<=gas->MSP;N++)
{
if(gas->ISPV[N]>0){
file_9<< "SP "<<N<<" DISSOCS "<<calc->TDISS[N]<<" RECOMBS "<<calc->TRECOMB[N]<<endl;
cout<<"SP"<<N<<"DISSOCS"<<calc->TDISS[N]<<" RECOMBS "<<calc->TRECOMB[N]<<endl;
file_3<<"SP "<<N<<" DISSOCS "<<calc->TDISS[N]<<" RECOMBS "<<calc->TRECOMB[N]<<endl;
}
}
for(N=1;N<=gas->MSP;N++)
{
cout<<"EX,C reaction"<<N<<" number"<<gas->TNEX[N]<<endl;
file_9<<"EX,C reaction "<<N<<" number "<<gas->TNEX[N]<<endl;
file_3<<"EX,C reaction "<<N<<" number "<<gas->TNEX[N]<<endl;
}
}
file_3<<"Total molecule moves = "<<NNN<<endl;
//dout
NNN=calc->TOTCOL;
file_3<<"Total collision events = "<<NNN<<endl;
//
file_3<<"Species dependent collision numbers in current sample"<<endl;
for(N=1;N<=gas->MSP;N++)
{
if(gas->IGAS!=8){
for(M=1;M<=gas->MSP;M++)
file_3<<calc->TCOL[N][M]<<"\t";
file_3<<endl;
//WRITE(3,901) (calc->TCOL[N][M],M=1,gas->MSP);
}
if(gas->IGAS==8){
for(M=1;M<=gas->MSP;M++)
file_3<<calc->TCOL[N][M]<<"\t";
file_3<<endl;
// WRITE(3,902) (calc->TCOL[N][M],M=1,gas->MSP);
}
}
//Dout
//901 FORMAT(5G13.5)
//902 FORMAT(8G13.5)
//dout
CTIME=clock();
file_3<<"Computation time "<<(double)CTIME/1000.0<< "seconds"<<endl;
file_3<<"Collision events per second "<<(calc->TOTCOL-calc->TOTCOLI)*1000.e00/(double)CTIME<<endl;
file_3<<"Molecule moves per secon "<<(calc->TOTMOV-calc->TOTMOVI)*1000.e00/(double)CTIME<<endl;
if(calc->ICLASS==0&& gas->MMVM==0&&calc->ISF==0){
//--a homogeneous gas with no vibratioal modes - assume that it is a collision test run
//******PRODUCES DATA FOR TABLES 6.1 AND 6.2 IN SECTION 6.2*******
//
A=0.e00;
B=0.e00;
C=0.e00;
for(N=1;N<=geom->NCCELLS;N++)
{
A+=geom->CCELL[5][N];
B+=geom->CCELL[4][N];
C+=geom->CCELL[3][N];
}
file_3<<"Overall time step "<<calc->DTM<<endl;
file_3<<"Molecules per collision cell "<<(double)(molecs->NM)/(double)(geom->NCCELLS)<<endl;
file_3<<"Mean cell time ratio "<< A/((double)(geom->NCCELLS)*calc->FTIME)<<endl;
file_3<<"Mean value of cross-section and relative speed "<<B/(double)(geom->NCCELLS)<<endl;
file_3<<"Mean half collision cell time step "<<C/(double)(geom->NCCELLS)<<endl;
if(gas->MSP==1){
A=2.e00*SPI*output->VAR[3][1] *(pow(gas->SP[1][1],2))*sqrt(4.e00*BOLTZ*gas->SP[2][1]/gas->SP[5][1])*pow((output->VAR[11][1])/gas->SP[2][1],(1.e00-gas->SP[3][1]));
//--Eqn. (2.33) for equilibhrium collision rate
file_3<<"Coll. rate ratio to equilib "<<calc->TCOL[1][1]/((double)(molecs->NM)*(calc->FTIME-output->TISAMP))/A<<endl;
}
else{
file_3<<"Species collision rate ratios to equilibrium"<<endl;
for(N=1;N<=gas->MSP;N++){
file_3<<"Collision rate for species "<<N<<endl;
for(M=1;M<=gas->MSP;M++)
{
THCOL[N][M]=2.e00*(1.e00/SPI)*output->VAR[3][1]*output->VARSP[1][1][M]*gas->SPM[2][N][M]*sqrt(2.e00*BOLTZ*gas->SPM[5][N][M]/gas->SPM[1][N][M])*pow(output->VAR[11][1]/gas->SPM[5][N][M],1.e00-gas->SPM[3][N][M]);
//--Eqn. (2.36) for equilibhrium collision rate of species N with species M
file_3<<"with species "<<M<<" "<<calc->TCOL[N][M]/((double)(molecs->NM)*gas->FSP[N][1]*(calc->FTIME-output->TISAMP))/THCOL[N][M]<<endl;
}
}
file_3<<endl;
for(N=1;N<=gas->MSP;N++){
file_3<<"Collision numbers for species "<<N<<endl;
for(M=1;M<=gas->MSP;M++){
file_3<<"with species "<<M<<" "<<calc->TCOL[N][M]<<endl;
}
}
}
}
file_3<<endl;
if(geom->ITYPE[1]==2|| geom->ITYPE[2]==1)
file_3<<"Surface quantities"<<endl;
for(JJ=1;JJ<=2;JJ++)
{
if(geom->ITYPE[JJ]==2){
file_3<<endl;
file_3<<"Surface at "<<geom->XB[JJ]<<endl;
file_3<<"Incident sample "<<output->VARS[0][JJ]<<endl;
file_3<<"Number flux "<<output->VARS[3][JJ]<<" /sq m/s"<<endl;
file_3<<"Inc pressure "<<output->VARS[5][JJ]<<" Refl pressure "<<output->VARS[6][JJ]<<endl;
file_3<<"Pressure "<< output->VARS[5][JJ]+output->VARS[6][JJ]<<" N/sq m"<<endl;
file_3<<"Inc y shear "<<output->VARS[7][JJ]<<" Refl y shear "<<output->VARS[8][JJ]<<endl;
file_3<<"Net y shear "<<output->VARS[7][JJ]-output->VARS[8][JJ]<<" N/sq m"<<endl;
file_3<<"Net z shear "<<output->VARS[9][JJ]-output->VARS[10][JJ]<<" N/sq m"<<endl;
file_3<<"Incident translational heat flux "<<output->VARS[11][JJ]<<" W/sq m"<<endl;
if(gas->MMRM>0)
file_3<<"Incident rotational heat flux "<<output->VARS[13][JJ]<<" W/sq m"<<endl;
if(gas->MMVM>0)
file_3<<"Incident vibrational heat flux "<<output->VARS[15][JJ]<<" W/sq m"<<endl;
if(gas->MELE>1)
file_3<<"Incident electronic heat flux "<<output->VARS[33][JJ]<<" W/sq m"<<endl;
file_3<<"Total incident heat flux "<<output->VARS[29][JJ]<<" W/sq m"<<endl;
file_3<<"Reflected translational heat flux "<<output->VARS[12][JJ]<<" W/sq m"<<endl;
if(gas->MMRM>0)
file_3<<"Reflected rotational heat flux "<<output->VARS[14][JJ]<<" W/sq m"<<endl;
if(gas->MMVM>0)
file_3<<"Reflected vibrational heat flux "<<output->VARS[16][JJ]<<" W/sq m"<<endl;
if(gas->MELE>1)
file_3<<"Reflected electronic heat flux "<<output->VARS[34][JJ]<<" W/sq m"<<endl;
file_3<<"Total reflected heat flux "<<output->VARS[30][JJ]<<" W/sq m"<<endl;
file_3<<"Net heat flux "<<output->VARS[31][JJ]<<" W/sq m"<<endl;
file_3<<"Slip velocity (y direction) "<<output->VARS[19][JJ]<<" m/s"<<endl;
file_3<<"Translational temperature slip"<<output->VARS[20][JJ]<<" K"<<endl;
if(gas->MMRM>0)
file_3<<"Rotational temperature slip "<<output->VARS[21][JJ]<<" K"<<endl;
if(gas->MSP>1)
{
for(L=1;L<=gas->MSP;L++)
{
file_3<<"Species "<<L<<" percentage "<<output->VARS[L+35][JJ]<<endl;
}
}
}
}
file_3<<endl;
//PPA=0;
for(int i=0;i<gas->MSP+1;i++)
PPA[i]=0;
for(N=1;N<=geom->NCELLS;N++)
{
for(M=1;M<=gas->MSP;M++){
PPA[M]=PPA[M]+output->VARSP[0][N][M];
}
}
// WRITE (*,*)
//cin.get();
if(gas->MSP>1)
{
file_3<<"GAINS FROM REACTIONS"<<endl;
file_3<<" Dissoc. Recomb. Endo. Exch. Exo. Exch."<<endl;
for(M=1;M<=gas->MSP;M++){
file_3<<" SPECIES "<<M<<" "<<gas->TREACG[1][M]<<" "<<gas->TREACG[2][M]<<" "<<gas->TREACG[3][M]<<" "<<gas->TREACG[4][M]<<endl;
}
file_3<<endl;
file_3<<"LOSSES FROM REACTIONS"<<endl;
file_3<<" Dissoc. Recomb. Endo. Exch. Exo. Exch."<<endl;
for(M=1;M<=gas->MSP;M++){
file_3<<" SPECIES "<<M<<" "<<gas->TREACL[1][M]<<" "<<gas->TREACL[2][M]<<" "<<gas->TREACL[3][M]<<" "<<gas->TREACL[4][M]<<endl;
}
file_3<<endl;
file_3<<"TOTALS"<<endl;
for(M=1;M<=gas->MSP;M++){
file_3<<" SPECIES "<<M<<" GAINS "<<gas->TREACG[1][M]+gas->TREACG[2][M]+gas->TREACG[3][M]+gas->TREACG[4][M]<<" LOSSES "<<gas->TREACL[1][M]+gas->TREACL[2][M]+gas->TREACL[3][M]+gas->TREACL[4][M]<<endl;
}
}
file_3<<endl;
file_3<<"Flowfield properties "<<endl;
file_3<< output->NSAMP<<" Samples"<<endl;
file_3<<"Overall gas"<<endl;
file_3<<"Cell x coord. Sample Number Dens. Density u velocity v velocity w velocity Trans. Temp. Rot. Temp. Vib. Temp. El. Temp. Temperature Mach no. Mols/cell m.c.t m.f.p mcs/mfp speed Pressure TTX TTY TTZ Species Fractions "<<endl;
for(N=1;N<=geom->NCELLS;N++)
{
file_3<< N<<" ";
for(M=1;M<=10;M++){
file_3<<output->VAR[M][N]<<" ";
}
file_3<<output->VAR[22][N]<<" ";
for(M=11;M<=21;M++){
file_3<<output->VAR[M][N]<<" ";
}
for(L=1;M<=gas->MSP;M++){
file_3<<output->VARSP[1][N][L]<<" ";
}
file_3<<endl;
}
file_3<<"Individual molecular species"<<endl;
for(L=1;L<=gas->MSP;L++){
file_3<<"Species "<<L<<endl;
file_3<<"Cell x coord. Sample Percentage Species TTx Species TTy Species TTz Trans. Temp. Rot. Temp. Vib. Temp. Spec. Temp u Diff. Vel. v Diff. Vel. w. Diff. Vel. Elec. Temp."<<endl;
for(N=1;N<=geom->NCELLS;N++){
file_3<< N<<" "<<output->VAR[1][N]<<" ";
for(M=0;M<=12;M++)
file_3<<output->VARSP[M][N][L]<<" ";
file_3<<endl;
}
}
//dout
//999 FORMAT (I5,30G13.5)
//998 FORMAT (G280.0)
// 997 FORMAT (G188.0)
// CLOSE (3)
file_3.close();
}
if(calc->ICLASS==0 && calc->ISF==1){
//--a homogeneous gas and the "unsteady sampling" option has been chosen-ASSUME THAT IT IS A RELAXATION TEST CASE FOR SECTION 6.2
INITIALISE_SAMPLES();
//write a special output file for internal temperatures and temperature versus collision number
//dout
file_10.open("RELAX.DAT", ios::app | ios::out);
if(file_10.is_open()){
cout<<"RELAX.DAT is opened"<<endl;
}
else{
cout<<"RELAX.DAT not opened"<<endl;
}
// OPEN (10,FILE='RELAX.DAT',ACCESS='APPEND')
A=2.0*calc->TOTCOL/molecs->NM; //--mean collisions
//--VAR(11,N) //--overall
//--VAR(8,N) //--translational
//--VAR(9,N) //--rotational
//--VAR(10,N) //--vibrational
//--VAR(22,N) //--electronic
file_10<<setw(15)<<A<<setw(15)<<output->VAR[8][1]<<setw(15)<<output->VAR[9][1]<<setw(15)<<output->VAR[8][1]-output->VAR[9][1]<<endl;
//file_10<<A<<"\t"<<output->VAR[11][1]<<"\t"<<output->VAR[8][1]<<"\t"<<output->VAR[9][1]<<"\t"<<output->VAR[10][1]<<"\t"<<output->VAR[22][1]<<endl;
//file_10<<A<<"\t"<<output->VAR[8][1]<<"\t"<<output->VAR[9][1]<<"\t"<<output->VAR[8][1]-output->VAR[9][1]<<endl;
// WRITE (10,950) A,VAR(8,1),VAR(9,1),VAR(8,1)-VAR(9,1) //--Generates output for Figs. 6.1 and 6.2
// WRITE (10,950) A,VAR(11,1),VAR(8,1),VAR(9,1),VAR(10,1),VAR(22,1) //--Generates output for modal temperatures in Figs. 6.3, 6.5 +
// WRITE (10,950) A,0.5D00*(VAR(8,1)+VAR(9,1)),VAR(10,1),0.5D00*(VAR(8,1)+VAR(9,1))-VAR(10,1) //--Generates output for Figs. 6.4
//
//--VARSP(8,N,L) //--overall temperature of species L
// WRITE (10,950) A,VARSP(8,1,3),VARSP(8,1,2),VARSP(8,1,5),VARSP(8,1,4),A //--output for Fig 6.17
// CLOSE (10)
file_10.close();
}
//dout
// 950 FORMAT (6G13.5)
if(gas->IGAS==8||gas->IGAS==6||gas->IGAS==4)
{
//--Write a special output file for the composition of a reacting gas as a function of time
//dout
//OPEN (10,FILE='COMPOSITION.DAT',ACCESS='APPEND')
file_10.open("COMPOSITION.DAT", ios::app | ios::out);
if(file_10.is_open()){
cout<<"COMPOSITION.DAT is opened"<<endl;
}
else{
cout<<"COMPOSITION.DAT not opened"<<endl;
}
AS=molecs->NM;
//dout
AT=calc->FTIME*1.e6;
if (gas->IGAS == 4)
file_10<< AT <<" "<<(double)(NMS[1])/1000000<<" "<<A<<" "<<output->VAR[11][1]<<endl; //--Data for fig
if (gas->IGAS == 8)
file_10<<AT<<" "<<NMS[1]/AS<<" "<<NMS[2]/AS<<" "<<NMS[3]/AS<<" "<<NMS[4]/AS<<" "<<NMS[5]/AS<<" "<<NMS[6]/AS<<" "<<NMS[7]/AS<<" "<<NMS[8]/AS<<" "<<output->VAR[11][1]<<endl;
if (gas->IGAS == 6)
file_10<<AT<<" "<<NMS[1]/AS<<" "<<NMS[2]/AS<<" "<<NMS[3]/AS<<" "<<NMS[4]/AS<<" "<<NMS[5]/AS<<" "<<output->VAR[11][1]<<endl;
//dout
// 888 FORMAT(10G13.5)
file_10.close();
}
if(calc->FTIME>0.5e00*calc->DTM){
//
//--reset collision and transit times etc.
//
cout<<"Output files written "<<endl;
DTMI=calc->DTM;
if(calc->IMTS<2){
if(calc->ICLASS>0)
calc->DTM*=2;
//--this makes it possible for DTM to increase, it will be reduced as necessary
for(NN=1;NN<=geom->NCELLS;NN++)
{
CDTM[NN]=calc->DTM;
B=geom->CELL[3][NN]-geom->CELL[2][NN] ;//--sampling cell width
if(output->VAR[13][NN]>20.e00){
//consider the local collision rate
CDTM[NN]=output->VAR[14][NN]*calc->CPDTM;
//look also at sampling cell transit time based on the local flow speed
A=(B/(fabs(output->VAR[5][NN])))*calc->TPDTM;
if(A<CDTM[NN])
CDTM[NN]=A;
}
else{
//-- base the time step on a sampling cell transit time at the refence vmp
A=calc->TPDTM*B/gas->VMPM;
if(A<CDTM[NN])
CDTM[NN]=A;
}
if(CDTM[NN]<calc->DTM)
calc->DTM=CDTM[NN];
}
}
else
{
//dout
//memset(CDTM, calc->DTM, sizeof(*CDTM));
for(int i=0;i<geom->NCELLS+1;i++)
CDTM[i]= calc->DTM;
//CDTM=calc->DTM;
}
for(N=1;N<=geom->NCELLS;N++){
NN=geom->ICCELL[3][N];
geom->CCELL[3][N]=0.5*CDTM[NN];
}
file_9<<"DTM changes from "<<DTMI<<" to "<<calc->DTM<<endl;
calc->DTSAMP=calc->DTSAMP*calc->DTM/DTMI;
calc->DTOUT=calc->DTOUT*calc->DTM/DTMI;
}
else
{
INITIALISE_SAMPLES();
}
if(calc->ICLASS==1&& calc->ISF==1)
{
//*************************************************************************
//--write TECPLOT data files for x-t diagram (unsteady calculation only)
//--comment out if not needed
//dout
file_18.open("DS1xt.DAT", ios::app | ios::out);
if(file_18.is_open()){
cout<<"DS1xt.DAT is opened"<<endl;
}
else
cout<<"DS1xt.DAT not opened"<<endl;
// OPEN (18,FILE='DS1xt.DAT',ACCESS='APPEND')
//--make sure that it is empty at the stary of the run
SETXT();
// CLOSE (18)
file_18.close();
//**************************************************************************
}
//WRITE (19,*) calc->FTIME,-output->VARS[5][1],-output->VARS[5][1]-output->VARS[6][1]
file_7.open("PROFILE.DAT" , ios::out);
if(file_7.is_open()){
cout<<"PROFILE.DAT is opened"<<endl;
}
else
cout<<"PROFILE.DAT not opened"<<endl;
// OPEN (7,FILE='PROFILE.DAT',FORM='FORMATTED')
//
//OPEN (8,FILE='ENERGYPROF.DAT',FORM='FORMATTED')
//
// 995 FORMAT (22G13.5)
// 996 FORMAT (12G14.6)
for(N=1;N<=geom->NCELLS;N++)
{
//
//--the following line is the default output
// WRITE (7,995) VAR(1,N),VAR(4,N),VAR(3,N),VAR(11,N),VAR(18,N),VAR(5,N),VAR(12,N),VAR(8,N),VAR(9,N),VAR(10,N),VAR(22,N), &
// (VARSP(8,N,M),M=1,MSP),(VARSP(1,N,M),M=1,MSP)
//
//--calculate energies per unit mass (employed for re-entry shock wave in Section 7.5)
C1=0.5e00*pow(output->VAR[5][N],2); //--Kinetic
C2=0.e00; //--Thermal
C3=0.e00; //--Rotational
C4=0.e00; //--Vibrational
C5=0.e00; //--Electronic
C6=0.e00; //--Formation
for(L=1;L<=gas->MSP;L++)
{
// C2=C2+3.D00*BOLTZ*VARSP(5,N,L)*VARSP(1,N,L)/SP(5,L)
A=(output->CS[1][N][L]/output->VARSP[1][N][L])*gas->SP[5][L];
if(output->CS[1][N][L]>0.5e00){
C2=C2+0.5e00*(output->CS[5][N][L]+output->CS[6][N][L]+output->CS[7][N][L])*gas->SP[5][L]/A;
if(gas->ISPR[1][L]>0)
C3=C3+output->CS[8][N][L];
if(gas->ISPV[L]>0)
C4=C4+output->CS[10][N][L]*BOLTZ*gas->SPVM[1][1][L]/A;
if(gas->NELL[L]>1)
C5=C5+output->CS[9][N][L]/A;
C6=C6+gas->SP[6][L]*output->CS[1][N][L]/A;
}
}
C2=C2-C1;
// A=0.5D00*VFX(1)**2+2.5D00*BOLTZ*FTMP(1)/(0.75*SP(5,2)+0.25*SP(5,1))
C7=C1+C2+C3+C4+C5+C6;
//
// WRITE (8,995) VAR(1,N),C1/A,C2/A,C3/A,C4/A,C5/A,C6/A,C7/A
//
//--the following lines are for normalised shock wave output in a simple gas (Sec 7.3)
C1=gas->FND[2]-gas->FND[1];
C2=gas->FTMP[2]-gas->FTMP[1];
file_7<<output->VAR[1][N]<<" "<<output->VAR[2][N]<<" "<<(0.5*(output->VAR[20][N]+output->VAR[21][N])-gas->FTMP[1])/C2<<" "<<(output->VAR[19][N]-gas->FTMP[1])/C2<<" "<<(output->VAR[11][N]-gas->FTMP[1])/C2<<" "<<(output->VAR[3][N]-gas->FND[1])/C1<<endl;
//--the following replaces sample size with density
//C3=0.D00
//DO L=1,MSP
// C3=C3+FND(1)*FSP(L,1)*SP(5,L) //--upstream density
//END DO
//C4=0.D00
//DO L=1,MSP
// C4=C4+FND(2)*FSP(L,2)*SP(5,L) //--upstream density
//END DO
//
// WRITE (7,996) VAR(1,N),(VAR(4,N)-C3)/(C4-C3),(0.5*(VAR(20,N)+VAR(21,N))-FTMP(1))/C2,(VAR(19,N)-FTMP(1))/C2,(VAR(11,N)-FTMP(1))/C2, &
// (VAR(3,N)-FND(1))/C1
//--the following lines is for a single species in a gas mixture
// C1=C1*FSP(3,1)
// WRITE (7,996) VAR(1,N),VARSP(1,N,3),(0.5*(VARSP(3,N,3)+VARSP(4,N,3))-FTMP(1))/C2,(VARSP(2,N,3)-FTMP(1))/C2,(VARSP(5,N,3)-FTMP(1))/C2,(VAR(3,N)*VARSP(1,N,3)-FND(1)*FSP(3,1))/C1
//
//--the following line is for Couette flow (Sec 7.4)
// WRITE (7,996) VAR(1,N),VAR(2,N),VAR(5,N),VAR(6,N),VAR(7,N),VAR(11,N)
//--the following line is for the breakdown of equilibrium in expansions (Sec 7.10)
// WRITE (7,996) VAR(1,N),VAR(2,N),VAR(12,N),VAR(4,N),VAR(5,N),VAR(8,N),VAR(9,N),VAR(10,N),VAR(11,N),VAR(19,N),VAR(20,N),VAR(21,N)
//
}
if(calc->ISF==1)
INITIALISE_SAMPLES();
// CLOSE(7)
file_7.close();
//
//--deallocate local variables
//
//dout
for(int i=0;i<gas->MMVM+1;i++){
delete [] TV[i];
}
delete [] TV;
delete [] TVIB;
delete [] VDOF;
for(int i=0;i<gas->MSP+1;i++){
delete [] THCOL[i];
}
delete [] THCOL;
// DEALLOCATE (TV,TVIB,VDOF,THCOL,STAT=ERROR)
// if(calc->ERROR)
// cout<<"PROGRAM COULD NOT DEALLOCATE OUTPUT VARIABLES"<<calc->ERROR;
calc->TOUT=calc->TOUT+calc->DTOUT;
return;
}
void COLLISIONS()
{
//CALC calc;
//MOLECS molecs;
//GAS gas;
//OUTPUT output;
//GEOM_1D geom;
start= clock();
double duration;
int N,NN,M,MM,L,LL,K,KK,KT,J,I,II,III,NSP,MAXLEV,IV,NSEL,KV,LS,MS,KS,JS,IIII,LZ,KL,IS,IREC,NLOOP,IA,IDISS,IEX,NEL,NAS,NPS,
JJ,LIMLEV,KVV,KW,INIL,INIM,JI,LV,IVM,NMC,NVM,LSI,JX,MOLA,KR,JKV,NSC,KKV,IAX,NSTEP,NTRY,NLEVEL,NSTATE,IK,NK,MSI ;
double A,AA,AAA,AB,B,BB,BBB,ABA,ASEL,DTC,SEP,VR,VRR,ECT,EVIB,ECC,ZV,ERM,C,OC,SD,D,CVR,PROB,RML,RMM,ECTOT,ETI,EREC,ET2,
XMIN,XMAX,WFC,CENI,CENF,VRRT,EA,DEN,E1,E2,VRI,VRA ;
double VRC[4],VCM[4],VRCP[4],VRCT[4];
// //N,M,K working integer
// //LS,MS,KS,JS molecular species
// //VRC components of the relative velocity
// //RML,RMM molecule mass parameters
// //VCM components of the center of mass velocity
// //VRCP post-collision components of the relative velocity
// //SEP the collision partner separation
// //VRR the square of the relative speed
// //VR the relative speed
// //ECT relative translational energy
// //EVIB vibrational energy
// //ECC collision energy (rel trans +vib)
// //MAXLEV maximum vibrational level
// //ZV vibration collision number
// //SDF the number of degrees of freedom associated with the collision
// //ERM rotational energy
// //NSEL integer number of selections
// //NTRY number of attempts to find a second molecule
// //CVR product of collision cross-section and relative speed
// //PROB a probability
// //KT third body molecule code
// //ECTOT energy added at recmbination
// //IREC initially 0, becomes 1 of a recombination occurs
// //WFC weighting factor in the cell
// //IEX is the reaction that occurs (1 if only one is possible)
// //EA activation energy
// //NPS the number of possible electronic states
// //NAS the number of available electronic states
//cout<<"START COLLISIONS"<<endl;
// dout
cout<<geom->XB[1]<<" "<<geom->XB[2]<<endl;
for( N=1;N<=geom->NCCELLS;N++)
{
if((calc->FTIME-geom->CCELL[5][N])>geom->CCELL[3][N])
{
// cout<<N <<" "<<geom->CCELL[3][N]<<endl;
DTC=2.e00*geom->CCELL[3][N];
//calculate collisions appropriate to time DTC
if(geom->ICCELL[2][N]>1)
{
//no collisions calculated if there are less than two molecules in collision cell
NN=geom->ICCELL[3][N];
WFC=1.e00;
if(geom->IWF==1 && geom->IVB==0)
{
//dout
WFC=1.e00+geom->WFM*pow(geom->CELL[1][NN],geom->IFX);
}
geom->CCELL[5][N]=geom->CCELL[5][N]+DTC;
if(geom->IVB==0)
{
AAA=geom->CCELL[1][N];
}
if(geom->IVB==1)
{
C=(geom->XB[2]+geom->VELOB*calc->FTIME-geom->XB[1])/(double)(geom->NDIV*geom->NCIS);
//dout
XMIN=geom->XB[1]+(double)(N-1)*C;
XMAX=XMIN+C;
//dout
WFC=1.e00+geom->WFM*pow((0.5e00*(XMIN+XMAX)),geom->IFX);
if(geom->IFX==0)
{
AAA=XMAX-XMIN;
}
if(geom->IFX==1)
{
AAA=PI*(pow(XMAX,2)-pow(XMIN,2)); //assumes unit length of full cylinder
}
if(geom->IFX==2)
{
AAA=1.33333333333333333333e00*PI*(pow(XMAX,3)-pow(XMIN,3)); //flow is in the full sphere
}
}
//these statements implement the N(N-1) scheme
ASEL=0.5e00*geom->ICCELL[2][N]*(geom->ICCELL[2][N]-1)*WFC*calc->FNUM*geom->CCELL[4][N]*DTC/AAA+geom->CCELL[2][N];
NSEL=ASEL;
//dout
geom->CCELL[2][N]=ASEL-(double)(NSEL);
if(NSEL>0)
{
I=0; //counts the number of selections
KL=0; //becomes 1 if it is the last selection
IIII=0; //becomes 1 if there is a recombination
for(KL=1;KL<=NSEL;KL++)
{
I=I+1;
III=0; //becomes 1 if there is no valid collision partner
if(geom->ICCELL[2][N]==2)
{
K=1+geom->ICCELL[1][N];
//dout
L=molecs->ICREF[K];
K=2+geom->ICCELL[1][N];
//dout
M=molecs->ICREF[K];
if(M==molecs->IPCP[L])
{
III=1;
geom->CCELL[5][N]=geom->CCELL[5][N]-DTC;
}
}
else
{
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
K=(int)(calc->RANF*(double)(geom->ICCELL[2][N]))+geom->ICCELL[1][N]+1;
//dout
L=molecs->ICREF[K];
//one molecule has been selected at random
if(calc->NNC==0)
{
//select the collision partner at random
M=L;
NTRY=0;
while(M==L)
{
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
K=(int)(calc->RANF*(double)(geom->ICCELL[2][N]))+geom->ICCELL[1][N]+1;
M=molecs->ICREF[K];
if(M==molecs->IPCP[L])
{
if(NTRY<5*geom->ICCELL[2][N])
{
M=L;
}
else
{
III = 1;
geom->CCELL[5][N]=geom->CCELL[5][N]-DTC/ASEL;
M=L+1;
}
}
}
}
else
{
//elect the nearest from the total number (< 30) or a random 30
if(geom->ICCELL[2][N]<30)
{
LL=geom->ICCELL[2][N];
}
else
{
LL=30;
}
SEP=1.0e10;
M=0;
for(J=1;J<=LL;J++)
{
if(LL<30)
{
K=J+geom->ICCELL[1][N];
}
else
{
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
K=(int)(calc->RANF*(double)(geom->ICCELL[2][N]))+geom->ICCELL[1][N]+1;
}
MM=molecs->ICREF[K];
if(MM != L)
{
//exclude the already selected molecule
if(MM != molecs->IPCP[L])
{
//exclude the previous collision partner
//dout
A=fabs(molecs->PX[1][L]-molecs->PX[1][MM]);
if(A<SEP&& A>1.e-8*geom->DDIV)
{
M=MM;
SEP=A;
}
}
}
}
}
}
if(III==0)
{
for(KK=1;KK<=3;KK++)
{
VRC[KK]=molecs->PV[KK][L]-molecs->PV[KK][M];
}
VRR=VRC[1]*VRC[1]+VRC[2]*VRC[2]+VRC[3]*VRC[3];
VR=sqrt(VRR);
VRI=VR;
//Simple GAs
if(gas->MSP==1)
{
//dout
CVR=VR*gas->CXSS*pow(2.e00*BOLTZ*gas->SP[2][1]/(gas->RMAS*VRR),(gas->SP[3][1]-0.5e00))*gas->RGFS;
if(CVR>geom->CCELL[4][N])
{
geom->CCELL[4][N]=CVR;
}
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(calc->RANF<CVR/geom->CCELL[4][N])
{
// the collision occurs
if(M==molecs->IPCP[L]&& L==molecs->IPCP[M])
{
file_9<<"Duplicate collision"<<endl;
}
calc->TOTCOL=calc->TOTCOL+1.e00;
calc->TCOL[1][1]=calc->TCOL[1][1]+2.e00;
output->COLLS[NN]=output->COLLS[NN]+1.e000;
output->WCOLLS[NN]=output->WCOLLS[NN]+WFC;
//dout
SEP=fabs(molecs->PX[1][L]-molecs->PX[1][M]);
output->CLSEP[NN]=output->CLSEP[NN]+SEP;
if(gas->ISPR[1][1]>0)
{
//Larsen-Borgnakke serial redistribution
ECT=0.5e00*gas->RMAS*VRR;
for(NSP=1;NSP<=2;NSP++)
{
//consider the molecules in turn
if(NSP==1)
{
K=L;
}
else
{
K=M;
}
if(gas->MMVM>0)
{
if(gas->ISPV[1]>0)
{
for(KV=1;KV<=gas->ISPV[1];KV++)
{
EVIB=(double)(molecs->IPVIB[KV][K]*BOLTZ*gas->SPVM[1][KV][1]);
ECC=ECT+EVIB;
if(gas->SPVM[3][KV][1]>0.0)
{
MAXLEV=ECC/(BOLTZ*gas->SPVM[1][KV][1]);
B=gas->SPVM[4][KV][1]/gas->SPVM[3][KV][1]; //Tdiss/Tref
A= gas->SPVM[4][KV][1]/output->VAR[8][NN] ;//Tdiss/Ttrans
//ZV=(A**SPM(3,1,1))*(SPVM(3,KV,1)*(B**(-SPM(3,1,1))))**(((A**0.3333333D00)-1.D00)/((B**0.33333D00)-1.D00))
ZV=pow(A,gas->SPM[3][1][1])*pow(gas->SPVM[3][KV][1]*pow(B,-gas->SPM[3][1][1]),((pow(A,0.3333333e00)-1e00)/(pow(B,33333e00)-1.e00)));
}
else
{
ZV=gas->SPVM[2][KV][1];
MAXLEV=ECC/(BOLTZ*gas->SPVM[1][KV][1])+1;
}
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(1.e00/ZV>calc->RANF)
{
II=0;
while(II==0)
{
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
IV=calc->RANF*(MAXLEV+0.99999999e00);
molecs->IPVIB[KV][K]=IV;
EVIB=(double)(IV)*BOLTZ;
if(EVIB<ECC)
{
PROB=pow((1.e00-EVIB/ECC),(1.5e00-gas->SPM[3][KV][1]));
//PROB is the probability ratio of eqn (3.28)
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(PROB>calc->RANF)
II=1;
}
}
ECT=ECC-EVIB;
}
}
}
}
//now rotation of this molecule
//dout
if(gas->ISPR[1][1] > 0)
{
if(gas->ISPR[2][1]==0)
{
B=1.e00/gas->SPR[1][1];
}
else //use molecule rather than mean value
{
B=1.e00/(gas->SPR[1][1]+gas->SPR[2][1]*output->VAR[8][NN]+gas->SPR[3][1]*pow(output->VAR[8][NN],2));
}
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(B>calc->RANF)
{
ECC=ECT +molecs->PROT[K];
if(gas->ISPR[1][1]==2)
{
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
ERM=1.e00-pow(calc->RANF,1.e00/(2.5e00-gas->SP[3][1])); //eqn(5.46)
}
else
{
//dout
LBS(0.5e00*gas->ISPR[1][1]-1.e00,1.5e00-gas->SP[3][1],ERM);
}
molecs->PROT[K]=ERM*ECC;
ECT=ECC-molecs->PROT[K];
}
}
}
//adjust VR for the change in energy;
VR=sqrt(2.e00*ECT/gas->SPM[1][1][1]);
}
//end of L-B redistribution
for(KK=1;KK<=3;KK++)
{
VCM[KK]=0.5e00*(molecs->PV[KK][L]+molecs->PV[KK][M]);
}
//dout
if(fabs(gas->SP[4][1]-1.0) < 0.001)
{
//use the VHS logic //dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
B=2.e00*calc->RANF-1.e00;
//B is the cosine of a random elevation angle
A=sqrt(1.e00-B*B);
VRCP[1]=B*VR;
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
C=2.e00*PI*calc->RANF;
//C is a random azimuth angle
//dout
VRCP[2]=A*cos(C)*VR;
VRCP[3]=A*sin(C)*VR;
}
else
{
//use the VSS logic //dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
B=2.e00*(pow(calc->RANF,gas->SP[4][1]))-1.e00;
//B is the cosine of the deflection angle for the VSS model (Eqn. 11.8) of Bird(1994))
A=sqrt(1.e00-B*B);
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
C=2.e00*PI*calc->RANF;
//dout
OC=(double)cos(C);
SD=(double)sin(C);
D=sqrt(pow(VRC[2],2)+pow(VRC[3],2));
VRA=VR/VRI;
VRCP[1]=(B*VRC[1]+A*SD*D)*VRA;
VRCP[2]=(B*VRC[2]+A*(VRI*VRC[3]*OC-VRC[1]*VRC[2]*SD)/D)*VRA;
VRCP[3]=(B*VRC[2]+A*(VRI*VRC[2]*OC-VRC[1]*VRC[3]*SD)/D)*VRA;
//the post-collision rel. velocity components are based on eqn (3.18)
}
for(KK=1;KK<=3;KK++)
{
molecs->PV[KK][L]=VCM[KK]+0.5e00*VRCP[KK];
molecs->PV[KK][M]=VCM[KK]-0.5e00*VRCP[KK];
}
molecs->IPCP[L]=M;
molecs->IPCP[M]=L;
}
} //collision occurrence
else
{
//Gas Mixture
LS=fabs(molecs->IPSP[L]);
MS=fabs(molecs->IPSP[M]);
CVR=VR*gas->SPM[2][LS][MS]*pow(((2.e00*BOLTZ*gas->SPM[5][LS][MS])/((gas->SPM[1][LS][MS])*VRR)),(gas->SPM[3][LS][MS]-0.5e00))*gas->SPM[6][LS][MS];
if(CVR>geom->CCELL[4][N])
{
geom->CCELL[4][N]=CVR;
}
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(calc->RANF<CVR/geom->CCELL[4][N] && molecs->IPCELL[L]>0 && molecs->IPCELL[M]>0)
{
//the collision occurs (-ve IPCELL indicates recombined molecule marled for removal)
if(M==molecs->IPCP[L] && L==molecs->IPCP[M])
{
file_9<<"Duplicate collision";
}
calc->TOTCOL=calc->TOTCOL+1.e00;
calc->TCOL[LS][MS]=calc->TCOL[LS][MS]+1.e00;
calc->TCOL[MS][LS]=calc->TCOL[MS][LS]+1.e00;
output->COLLS[NN]=output->COLLS[NN]+1.e00;
output->WCOLLS[NN]=output->WCOLLS[NN]+WFC;
SEP=fabs(molecs->PX[1][L]-molecs->PX[1][M]);
output->CLSEP[NN]=output->CLSEP[NN]+SEP;
RML=gas->SPM[1][LS][MS]/gas->SP[5][MS];
RMM=gas->SPM[1][LS][MS]/gas->SP[5][LS];
for(KK=1;KK<=3;KK++)
{
VCM[KK]=RML*molecs->PV[KK][L]+RMM*molecs->PV[KK][M];
}
IDISS=0;
IREC=0;
IEX=0;
//check for dissociation
if(gas->ISPR[1][LS]>0 || gas->ISPR[1][MS]>0)
{
ECT=0.5e00*gas->SPM[1][LS][MS]*VRR;
for(NSP=1;NSP<=2;NSP++)
{
if(NSP==1)
{
K=L; KS=LS; JS=MS;
}
else
{
K=M ; KS=MS ; JS=LS;
}
if(gas->MMVM>0)
{
if(gas->ISPV[KS]>0)
{
for(KV=1;KV<=gas->ISPV[KS];KV++)
{
if(molecs->IPVIB[KV][K]>=0 && IDISS==0)
{
//do not redistribute to a dissociating molecule marked for removal
EVIB=(double)(molecs->IPVIB[KV][K]*BOLTZ*gas->SPVM[1][KV][KS]);
ECC=ECT+EVIB;
MAXLEV=ECC/(BOLTZ*gas->SPVM[1][KV][KS]);
LIMLEV=gas->SPVM[4][KV][KS]/gas->SPVM[1][KV][KS];
if(MAXLEV > LIMLEV)
{
//dissociation occurs subject to reduction factor - reflects the infinity of levels past the dissociation limit
//dout
// RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
if(calc->RANF<gas->SPVM[5][KV][KS])
{
IDISS=1;
LZ=molecs->IPVIB[KV][K];
output->NDISSL[LZ]=output->NDISSL[LZ]+1;
ECT=ECT-BOLTZ*gas->SPVM[4][KV][KS]+EVIB;
//adjust VR for the change in energy
VRR=2.e00*ECT/gas->SPM[1][LS][MS];
VR=sqrt(VRR);
molecs->IPVIB[KV][K]=-1;
//a negative IPVIB marks a molecule for dissociation
}
}
}
}
}
}
}
}
IEX=0; //becomes the reaction number if a reaction occurs
IREC=0; //becomes 1 if a recombination occurs
if(IDISS==0)
{
//dissociation has not occurred
//consider possible recombinations
if(gas->ISPRC[LS][MS]>0 && geom->ICCELL[2][N]>2)
{
//possible recombination using model based on collision volume for equilibrium
KT=L;
//NTRY=0
while(KT==L||KT==M)
{
NTRY+=1;
// if(NTRY>100)
// {
// cout>>"NTRY 3rd body"<<NTRY;
// }
//RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);\
K=(int)(calc->RANF*(double)(geom->ICCELL[2][N]))+geom->ICCELL[1][N]+1;
KT=molecs->ICREF[K];
}
KS=molecs->IPSP[KT];
//the potential third body is KT OF species KS
AA=(PI/6.e00)*pow((gas->SP[1][LS]+gas->SP[1][MS]+gas->SP[1][KS]),3); //reference volume
BB=AA*gas->SPRC[1][LS][MS][KS]*pow(output->VAR[8][NN]/gas->SPVM[1][gas->ISPRK[LS][MS]][gas->ISPRC[LS][MS]],gas->SPRC[2][LS][MS][KS]);//collision volume
B=BB*geom->ICCELL[2][N]*calc->FNUM/AAA;
if(B>1.e00)
{
cout<<"THREE BODY PROBABILITY"<<B;
//for low density flows in which three-body collisions are very rare, it is advisable to consider recombinations in only a small
//fraction of collisions and to increase the pribability by the inverse of this fraction. This message provides a warning if this
//factor has been set to an excessively large value
}
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(calc->RANF<B)
{
IREC=1;
calc->TRECOMB[gas->ISPRC[LS][MS]]=calc->TRECOMB[gas->ISPRC[LS][MS]]+1.e00;
//the collision now becomes a collision between these with L having the center of mass velocity
A=0.5e00*gas->SPM[1][LS][MS]*VRR ;//the relative energy of the recombining molecules
if(gas->ISPR[1][LS]>0)
A=A+molecs->PROT[L];
if(gas->MELE>1)
A=A+molecs->PELE[L];
if(gas->ISPV[LS]>0)
{
for(KVV=1;KVV<=gas->ISPV[LS];KVV++)
{
JI=molecs->IPVIB[KVV][L];
if(JI<0)
JI=-JI;
if(JI==99999)
JI=0;
A=A+(double)(JI)*BOLTZ*gas->SPVM[1][KVV][LS];
}
}
if(gas->ISPR[1][MS]>0)
A+=molecs->PROT[M];
if(gas->MELE>1)
A=A+molecs->PELE[M];
if(gas->ISPV[MS]>0)
{
for(KVV=1;KVV<=gas->ISPV[MS];KVV++)
{
JI=molecs->IPVIB[KVV][M];
if(JI<0)
JI=-JI;
if(JI==99999)
JI=0;
A=A+(double)(JI)*BOLTZ*gas->SPVM[1][KVV][MS];
}
}
gas->TREACL[2][LS]=gas->TREACL[2][LS]-1;
gas->TREACL[2][MS]=gas->TREACL[2][MS]-1;
LSI=LS;
MSI=MS;
LS=gas->ISPRC[LS][MS];
molecs->IPSP[L]=LS;
//any additional vibrational modes must be set to zero
IVM=gas->ISPV[LSI];
NMC=molecs->IPSP[L];
NVM=gas->ISPV[NMC];
if(NVM>IVM)
{
for(KV=IVM+1;KV<=NVM;KV++)
{
molecs->IPVIB[KV][L]=0;
}
}
if(gas->MELE>1)
molecs->PELE[KV]=0.e00;
molecs->IPCELL[M]=-molecs->IPCELL[M]; //recombining molecule M marked for removal
M=KT; //third body molecule is set as molecule M
MS=KS;
gas->TREACG[2][LS]=gas->TREACG[2][LS]+1;
if(gas->ISPR[1][LS]>0)
{
molecs->PROT[L]=0.e00;
}
if(gas->MELE>1)
molecs->PELE[L]=0.e00;
if(gas->ISPV[LS]>0)
{
for(KVV=1;KVV<=gas->ISPV[LS];KVV++)
{
if(molecs->IPVIB[KVV][L]<0)
{
molecs->IPVIB[KVV][L]=-99999;
}
else
{
molecs->IPVIB[KVV][L]=0;
}
}
}
if(gas->ISPR[1][MS]>0)
{
molecs->PROT[M]=molecs->PROT[KT];
}
if(gas->MELE>1)
molecs->PELE[M]=molecs->PELE[KT];
if(gas->ISPV[MS]>0)
{
for(KVV=1;KVV<=gas->ISPV[MS];KVV++)
{
molecs->IPVIB[KVV][M]=molecs->IPVIB[KVV][KT];
}
}
ECTOT=A+gas->SPVM[4][1][LS]*BOLTZ ; //the energy added to this collision
for(KK=1;KK<=3;KK++)
{
molecs->PV[KK][L]=VCM[KK];
}
for(KK=1;KK<=3;KK++)
{
VRC[KK]=molecs->PV[KK][L]-molecs->PV[KK][M];
}
VRR=VRC[1]*VRC[1]+VRC[2]*VRC[2]+VRC[3]*VRC[3];
ECT=0.5e00*gas->SPM[1][LS][MS]*VRR*ECTOT;
//set the vibrational energy of the recombined molecule L to enforce detailed balance
IK=-1;
NK=-1;
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
//NTRY=0;
while(IK<0)
{
// NTRY+=1;
// if(NTRY>100)
// cout<<"NTRY VibEn"<<NTRY;
NK=NK+1;
BB=(output->VAR[8][NN]-gas->SPRT[1][LSI][MSI])*(gas->SPRP[2][LSI][MSI][NK]-gas->SPRP[1][LSI][MSI][NK])/(gas->SPRT[2][LSI][MSI]-gas->SPRT[1][LSI][MSI])-gas->SPRP[1][LSI][MSI][NK];
if(calc->RANF<BB)
IK=NK;
}
molecs->IPVIB[1][L]=IK;
ECT=ECT-(double)(IK)*BOLTZ*gas->SPVM[1][gas->ISPRK[LSI][MSI]][LS];
VRR=2.e00*ECT/gas->SPM[1][LS][MS];
VR=sqrt(VRR);
RML=gas->SPM[1][LS][MS]/gas->SP[5][MS];
RMM=gas->SPM[1][LS][MS]/gas->SP[5][LS];
for(KK=1;KK<=3;KK++)
{
VCM[KK]=RML*molecs->PV[KK][L]+RMM*molecs->PV[KK][M];
}
}
}
//consider exchange and chain reactions
if(gas->NSPEX[LS][MS]>0 && IREC==0 && IDISS==0)
{
//possible exchange reaction
//memset(gas->PSF,0.e00,sizeof(*gas->PSF));//gas->PSF=0.e00; //PSF(MMEX) PSF is the probability that this reaction will occur in this collision
for(int i=0;i<gas->MMEX+1;i++)
gas->PSF[i]=0.e00;
for(JJ=1;JJ<=gas->NSPEX[LS][MS];JJ++)
{
if(LS==gas->ISPEX[JJ][1][LS][MS])
{
K=L; KS=LS;JS=MS;
}
else
{
K=M; KS=MS; JS=LS;
}
//the pre-collision molecule that splits is K of species KS
if(gas->SPEX[3][JJ][LS][MS]<0.e00)
KV=gas->ISPEX[JJ][5][LS][MS];
if(gas->SPEX[3][JJ][LS][MS]>0.e00)
{
KV=gas->ISPEX[JJ][7][LS][MS];
}
JI=molecs->IPVIB[KV][K];
if(JI<0)
JI=-JI;
if(JI==99999)
JI=0;
ECC=0.5e00*gas->SPM[1][LS][MS]*VRR+(double)(JI)*BOLTZ*gas->SPVM[1][KV][KS];
if(gas->SPEX[3][JJ][KS][JS]>0.e00)
{
//reverse exothermic reaction
gas->PSF[JJ]=(gas->SPEX[1][JJ][KS][JS]*pow(output->VAR[8][NN]/273.e00,gas->SPEX[2][JJ][KS][JS]))*exp(-gas->SPEX[6][JJ][KS][JS]/(BOLTZ*output->VAR[8][NN]));
}
else
{
//forward endothermic reaction
MAXLEV=ECC/(BOLTZ*gas->SPVM[1][KV][KS]);
EA=fabs(gas->SPEX[3][JJ][KS][JS]); //temporarily just the heat of reaction;
if(ECC>EA)
{
//the collision energy must exceed the heat of reaction
EA=EA+gas->SPEX[6][JJ][KS][JS]; //the activation energy now includes the energy barrier
DEN=0.e00;
for(IAX=0;IAX<=MAXLEV;IAX++)
{
DEN=DEN+pow((1.e00-(double)(IAX)*BOLTZ*gas->SPVM[1][KV][KS]/ECC),(1.5e00-gas->SPM[3][KS][JS]));
}
gas->PSF[JJ]=(double)(gas->ISPEX[JJ][6][LS][MS])*pow((1.e00-EA/ECC),(1.5e00-gas->SPM[3][KS][JS]))/DEN;
}
}
}
if(gas->NSPEX[LS][MS]>1)
{
BB=0.e00;
for(JJ=1;JJ<=gas->NSPEX[LS][MS];JJ++)
{
BB=BB+gas->PSF[JJ];
}
//BB is the sum of the probabilities
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(BB>calc->RANF)
{
BB=0.e00;
IEX=0;
JJ=0;
//NTRY=0;
while(JJ<gas->NSPEX[LS][MS]&& IEX==0)
{
// NTRY=NTRY+1;
// if(NTRY>100)
// {
// cout<<"NTRY find IEX"<<NTRY;
// }
JJ+=1;
BB+=gas->PSF[JJ];
if(BB>calc->RANF)
IEX=JJ;
}
}
}
else
{
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
IEX=0;
if(gas->PSF[1]>calc->RANF)
IEX=1;
}
if(IEX>0)
{
//exchange or chain reaction occurs
JX=gas->NEX[IEX][LS][MS];
//cout<<"Reaction"<<JX;
gas->TNEX[JX]=gas->TNEX[JX]+1.e00;
//cout<<IEX<<L<<M<<LS<<MS;
molecs->IPSP[L]=gas->ISPEX[IEX][3][LS][MS]; //L is now the new molecule that splits
molecs->IPSP[M]=gas->ISPEX[IEX][4][LS][MS];
LSI=LS;
MSI=MS;
//any additional vibrational modes must be set to zero
IVM=gas->ISPV[LS];
NMC=molecs->IPCP[L];
NVM=gas->ISPV[NMC];
if(NVM>IVM)
{
for(KV=IVM+1;KV<=NVM;KV++)
{
molecs->IPVIB[KV][L]=0;
}
}
IVM=gas->ISPV[MS];
NMC=molecs->IPCP[M];
NVM=gas->ISPV[NMC];
if(NVM>IVM)
{
for(KV=IVM+1;KV<=NVM;KV++)
{
molecs->IPVIB[KV][M]=0;
}
}
//put all pre-collision energies into the relative translational energy and adjust for the reaction energy
ECT=0.5e00*gas->SPM[1][LS][MS]*VRR;
if(gas->ISPR[1][LS]>0)
ECT=ECT+molecs->PROT[L];
if(gas->MELE>1)
ECT=ECT+molecs->PELE[L];
if(gas->ISPV[LS]>0)
{
for(KV=1;KV<=gas->ISPV[LS];KV++)
{
JI=molecs->IPVIB[KV][L];
if(JI<0)
JI=-JI;
if(JI==99999)
JI=0;
ECT=ECT+(double)(JI)*BOLTZ*gas->SPVM[1][KV][LS];
}
}
if(gas->ISPR[1][MS]>0)
ECT=ECT+molecs->PROT[M];
if(gas->MELE>1)
ECT=ECT+molecs->PELE[M];
if(gas->ISPV[MS]>0)
{
for(KV=1;KV<=gas->ISPV[MS];KV++)
{
JI=molecs->IPVIB[KV][M];
if(JI<0)
JI=-JI;
if(JI==99999)
JI=0;
ECT=ECT+(double)(JI)*BOLTZ*gas->SPVM[1][KV][MS];
}
}
ECT=ECT+gas->SPEX[3][IEX][LS][MS];
if(ECT<0.0)
{
cout<<"-VE ECT "<<ECT<<endl;
cout<<"REACTION "<<JJ<<" BETWEEN "<<LS<<" "<<MS<<endl;
//dout
cin.get();
return ;
}
if(gas->SPEX[3][IEX][LS][MS]<0.e00)
{
gas->TREACL[3][LS]=gas->TREACL[3][LS]-1;
gas->TREACL[3][MS]=gas->TREACL[3][MS]-1;
LS=molecs->IPSP[L] ;
MS=molecs->IPSP[M] ;
gas->TREACG[3][LS]=gas->TREACG[3][LS]+1;
gas->TREACG[3][MS]=gas->TREACG[3][MS]+1;
}
else
{
gas->TREACL[4][LS]=gas->TREACL[4][LS]-1;
gas->TREACL[4][MS]=gas->TREACL[4][MS]-1;
LS=molecs->IPSP[L] ;
MS=molecs->IPSP[M] ;
gas->TREACG[4][LS]=gas->TREACG[4][LS]+1;
gas->TREACG[4][MS]=gas->TREACG[4][MS]+1;
}
RML=gas->SPM[1][LS][MS]/gas->SP[5][MS];
RMM=gas->SPM[1][LS][MS]/gas->SP[5][LS];
//calculate the new VRR to match ECT using the new molecular masses
VRR=2.e00*ECT/gas->SPM[1][LS][MS];
if(gas->ISPV[LS]>0)
{
for(KV=1;gas->ISPV[LS];KV++)
{
if(molecs->IPVIB[KV][L]<0)
{
molecs->IPVIB[KV][L]=-99999;
}
else
{
molecs->IPVIB[KV][L]=0;
}
}
}
if(gas->ISPR[1][LS]>0)
molecs->PROT[L]=0;
if(gas->MELE>1)
molecs->PELE[L]=0.e00;
if(gas->ISPV[MS]>0)
{
for(KV=1;gas->ISPV[MS];KV++)
{
if(molecs->IPVIB[KV][M]<0)
{
molecs->IPVIB[KV][M]=-99999;
}
else
{
molecs->IPVIB[KV][M]=0;
}
}
}
if(gas->ISPR[1][MS]>0)
molecs->PROT[M]=0;
if(gas->MELE>1)
molecs->PELE[M]=0.e00;
//set vibrational level of product molecule in exothermic reaction to enforce detailed balance
if(gas->SPEX[3][IEX][LSI][MSI]>0.e00)
{
//exothermic exchange or chain reaction
IK=-1; //becomes 0 when the level is chosen
NK=-1;
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
//NTRY=0;
while(IK<0)
{
// NTRY=NTRY+1;
// if(NTRY>100)
// {
// cout>>"NTRY VibProd"<<NTRY<<endl;
// }
NK=NK+1;
BB=(output->VAR[8][NN]-gas->SPEX[4][IEX][LSI][MSI])* (gas->SPREX[2][IEX][LSI][MSI][NK]-gas->SPREX[1][IEX][LSI][MSI][NK])/(gas->SPEX[5][IEX][LSI][MSI]-gas->SPEX[4][IEX][LSI][MSI])+gas->SPREX[1][IEX][LSI][MSI][NK];
if(calc->RANF<BB)
IK=NK;
}
if(gas->NSLEV[1][LS]>0)
{
IK+=gas->NSLEV[1][LS];
gas->NSLEV[1][LS]=0;
}
KV=gas->ISPEX[IEX][7][LSI][MSI];
molecs->IPVIB[KV][L]=IK;
EVIB=(double)(IK)*BOLTZ*gas->SPVM[1][KV][LS];
ECT=ECT-EVIB;
if(ECT<0.e00)
{
//NTRY=0;
while(ECT<0.e00)
{
//NTRY+=1;
// if(NTRY>100)
// cout<<"NTRY ECT<0"<<NTRY<<endl;
molecs->IPVIB[KV][L]=molecs->IPVIB[KV][L]-1;
gas->NSLEV[1][LS]+=1;
ECT=ECT+BOLTZ*gas->SPVM[1][KV][LS];
}
}
}
else
{
//for endothermic reaction, select vibration from vib. dist. at macroscopic temperature
//normal L-B selection would be from the excessively low energy after the endo. reaction
KV=gas->ISPEX[IEX][5][LS][MS];
//dout
SVIB(LS,output->VAR[8][NN],IK,KV);
if(gas->NSLEV[2][LS]>0)
{
IK=IK+gas->NSLEV[2][LS];
gas->NSLEV[2][LS]=0;
}
molecs->IPVIB[KV][L]=IK;
EVIB=(double)(IK)*BOLTZ*gas->SPVM[1][KV][LS];
ECT=ECT-EVIB;
if(ECT<0.e00)
{
//NTRY=0;
while(ECT<0.e00)
{
//NTRY+=1;
molecs->IPVIB[KV][L]-=1;
gas->NSLEV[2][LS]+=1;
ECT=ECT+BOLTZ*gas->SPVM[1][KV][LS];
// if(NTRY>100)
// {
//cout<<"NTRY ECT<0#2"<<NTRY<<endl;
// molecs->IPVIB[KV][L]=0;
// ECT+=EVIB;
// gas->NSLEV[2][LS]=0;
// }
}
}
}
//set rotational energy of molecule L to equilibrium at the macroscopic temperature
SROT(LS,output->VAR[8][NN],molecs->PROT[L]);
if(gas->SLER[LS]>1.e-21)
{
molecs->PROT[L]+=gas->SLER[LS];
gas->SLER[LS]=1.e-21;
}
ECT-=molecs->PROT[L];
ABA=molecs->PROT[L];
if(ECT<0.e00)
{
//NTRY=0;
while(ECT<0.e00)
{
//NTRY+=1;
BB=0.5e00*molecs->PROT[L];
gas->SLER[LS]+=BB;
molecs->PROT[L]=BB;
ECT+=BB;
// if(NTRY>100)
// {
// cout<<"NTRY ECT<0#3"<<NTRY<<L<<endl;
// ECT+=ABA;
// molecs->PROT[L]=0;
// gas->SLER[LS]=1.e-21;
// }
}
}
//calculate the new VRR to match ECT using the new molecular masses
VRR=2.e00*ECT/gas->SPM[1][LS][MS];
}
}
}
//end of reactions other than the deferred dissociation action in the DISSOCIATION subroutine
if(IREC==0 && IDISS==0)
{
//recombined redistribution already made and there is a separate subroutine for dissociation
//Larsen-Borgnakke serial redistribution
ECT=0.5e00*gas->SPM[1][LS][MS]*VRR;
for(NSP=1;NSP<=2;NSP++)
{
if(NSP==1)
{
K=L;KS=LS;JS=MS;
}
else
{
K=M; KS=MS; JS=LS;
}
//now electronic energy for this molecule
if(gas->MELE>1)
{
B=1.e00/gas->QELC[3][1][KS];
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(B>calc->RANF)
{
NPS=0;
ECC=ECT+molecs->PELE[K];
if(gas->NELL[KS]==1){
NPS=gas->QELC[1][1][KS]; //number of possible states is at least the degeneracy of the ground state
}
if(gas->NELL[KS]>1)
{
for(NEL=1;NEL<=gas->NELL[KS];NEL++)
{
if(ECC>BOLTZ*gas->QELC[2][NEL][KS])
NPS=NPS+gas->QELC[1][NEL][KS];
}
II=0;
//NTRY=0;
while(II==0)
{
//NTRY+=1;
// if(NTRY>100)
// cout<<"NTRY ElecEn"<<NTRY<<endl;
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
NSTATE=ceil(calc->RANF*NPS);//random state, now determine the energy level
NAS=0;
NLEVEL=-1;
for(NEL=1;NEL<=gas->NELL[KS];NEL++)
{
NAS= NAS+gas->QELC[1][NEL][KS];
if(NSTATE<=NAS && NLEVEL<0)
NLEVEL=NEL;
}
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if((1.e00/(B*gas->QELC[3][NLEVEL][KS]))<calc->RANF)
{
II=1;
}
else
{
if(ECC>BOLTZ*gas->QELC[2][NLEVEL][KS])
{
PROB=pow(1.e00-BOLTZ*gas->QELC[2][NLEVEL][KS]/ECC,(1.5e00-gas->SPM[3][KS][JS]));
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(PROB>calc->RANF)
{
II=1;
molecs->PELE[K]=BOLTZ*gas->QELC[2][NLEVEL][KS];
}
}
}
}
ECT=ECC-molecs->PELE[K];
}
}
}
//now the vibrational energy for this molecule
if(gas->MMVM>0 && IEX==0)
{
if(gas->ISPV[KS]>0)
{
for(KV=1;KV<=gas->ISPV[KS];KV++)
{
if(molecs->IPVIB[KV][K]>=0 && IDISS==0) //do not redistribute to a dissociating molecule marked for removal
{
EVIB=(double)(molecs->IPVIB[KV][K])*BOLTZ*gas->SPVM[1][KV][KS];
ECC=ECT+EVIB;
MAXLEV=ECC/(BOLTZ*gas->SPVM[1][KV][KS]);
if(gas->SPVM[3][KV][KS]>0.0)
{
B=gas->SPVM[4][KV][KS]/gas->SPVM[3][KV][KS];
A=gas->SPVM[4][KV][KS]/output->VAR[8][NN];
ZV = pow(A,gas->SPM[3][KS][JS])*pow((gas->SPVM[2][KV][KS]*pow(B,-gas->SPM[3][KS][JS])),((pow(A,0.3333333e00)-1.e00)/(pow(B,0.33333e00)-1.e00)));
}
else
ZV=gas->SPVM[2][KV][KS];
// RANDOM_NUMBER(RANF) //dout
calc->RANF=((double)rand()/(double)RAND_MAX);
if(1.e00/ZV>calc->RANF ||IREC==1)
{
II=0;
NSTEP=0;
while(II==0 && NSTEP<100000)
{
NSTEP+=1;
if(NSTEP>99000)
{
cout<<NSTEP<<" "<<ECC<<" "<<MAXLEV<<endl;
//dout
return ;
}
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
IV=calc->RANF*(MAXLEV+0.99999999e00);
molecs->IPVIB[KV][K]=IV;
EVIB=(double)(IV)*BOLTZ*gas->SPVM[1][KV][KS];
if(EVIB<ECC)
{
PROB=pow(1.e00-EVIB/ECC,1.5e00-gas->SPVM[3][KS][JS]);
//PROB is the probability ratio of eqn (3.28)
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(PROB>calc->RANF)
II=1;
}
}
ECT=ECC-EVIB;
}
}
}
}
}
//now rotation of this molecule
//dout
if(gas->ISPR[1][KS] > 0)
{
if(gas->ISPR[2][KS]==0 && gas->ISPR[2][JS]==0)
{
B=1.e00/gas->SPM[7][KS][JS];
}
else
B=1.e00/(gas->SPR[1][KS])+gas->SPR[2][KS]*output->VAR[8][NN]+gas->SPR[3][KS]*pow(output->VAR[8][NN],2);
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(B>calc->RANF|| IREC==1)
{
ECC=ECT+molecs->PROT[K];
if(gas->ISPR[1][KS]==2)
{
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
ERM=1.e00-pow(calc->RANF,(1.e00/(2.5e00-gas->SPM[3][KS][JS])));//eqn(5.46)
}
else
LBS(0.5e00*gas->ISPR[1][KS]-1.e00,1.5e00-gas->SPM[3][KS][JS],ERM);
molecs->PROT[K]=ERM*ECC;
ECT=ECC-molecs->PROT[K];
}
}
}
//adjust VR for the change in energy
VR=sqrt(2.e00*ECT/gas->SPM[1][LS][MS]);
}//end of L-B redistribution
if(fabs(gas->SPM[8][LS][MS]-1.0)<0.001)
{
//use the VHS logic
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
B=2.e00*calc->RANF-1.e00;
//B is the cosine of a random elevation angle
A=sqrt(1.e00-B*B);
VRCP[1]=B*VR;
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
C=2.e00*PI*calc->RANF;
//C is a random azimuth angle;
VRCP[2]=A*(double)cos(C)*VR;
VRCP[3]=A*(double)sin(C)*VR;
}
else
{
//use the VSS logic
//the VRCP terms do not allow properly for the change in VR - see new book !STILL TO BE FIXED
VRA=VR/VRI;
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
B=2.e00*pow(calc->RANF,gas->SP[4][1])-1.e00;
// B is the cosine of the deflection angle for the VSS model
A=sqrt(1.e00-B*B);
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
C=2.e00*PI*calc->RANF;
OC=(double)cos(C);
SD=(double)sin(C);
D=sqrt(pow(VRC[2],2)+pow(VRC[3],2));
VRCP[1]=(B*VRC[1]+A*SD*D)*VRA;
VRCP[2]=(B*VRC[2]+A*(VRI*VRC[3]*OC-VRC[1]*VRC[2]*SD)/D)*VRA;
VRCP[3]=(B*VRC[3]+A*(VRI*VRC[2]*OC+VRC[1]*VRC[3]*SD)/D)*VRA;
//the post-collision rel. velocity components are based on eqn (3.18)
}
for(KK=1;KK<=3;KK++)
{
molecs->PV[KK][L]=VCM[KK]+RMM*VRCP[KK];
molecs->PV[KK][M]=VCM[KK]-RMM*VRCP[KK];
}
molecs->IPCP[L]=M;
molecs->IPCP[M]=L;
//call energy(0,E2)
// ! IF (Dfabs(E2-E1) > 1.D-14) read(*,*)
}////collision occurrence
}
}//separate simplegas / mixture coding
}
}
}
}
}//remove any recombined atoms
duration = ( std::clock() - start ) / (double) CLOCKS_PER_SEC;
std::cout<<"printf: "<< duration <<'\n';
for(N=1;N<=molecs->NM;N++)
{
if(molecs->IPCELL[N]<0)
REMOVE_MOL(N);
}
return;
}
|
c2dc845dd1ec2b4192a6215edee153451774bb10.cu
|
//
// main.cpp
// DS
//
// Created by Shubham Gupta on 31/03/17.
// Copyright © 2017 Shubham Gupta. All rights reserved.
// Modified by Utkarsh Aashu Mishra on 5/02/2014
// Copyright © 2018 Utkarsh Aashu Mishra. All rights reserved.
#include <stdio.h>
#include <iostream>
#include <cmath>
#include <string.h>
#include <algorithm>
#include <fstream>
#include<sstream>
#include <iomanip>
#include <ctime>
using namespace std;
#define PI 3.1415926535897932
#define DPI 6.283185307179586
#define SPI 1.772453850905516
#define BOLTZ 1.380658e-23
#define AVOG 6.022169e26
void ALLOCATE_GAS();
void HARD_SPHERE();
void ARGON();
void IDEAL_NITROGEN();
void REAL_OXYGEN();
void IDEAL_AIR();
void REAL_AIR();
void HELIUM_ARGON_XENON();
void OXYGEN_HYDROGEN();
void INITIALISE_SAMPLES();
void DERIVED_GAS_DATA();
void SET_INITIAL_STATE_1D();
void MOLECULES_ENTER_1D();
void FIND_CELL_1D(double &,int &,int &);
void FIND_CELL_MB_1D(double &,int &,int &,double &);
void RVELC(double &,double &,double &);
void SROT(int &,double &,double &);
void SVIB(int &,double &,int &, int&);
void SELE(int &,double &,double &);
void CQAX(double&,double &,double&);
void LBS(double,double,double&);
void REFLECT_1D(int&,int,double&);
void RBC(double &, double &, double & , double &, double &,double &);
void AIFX(double & ,double &, double & , double &, double &, double&, double &, double&);
void REMOVE_MOL(int &);
void INDEX_MOLS();
void SAMPLE_FLOW();
void ADAPT_CELLS_1D();
void EXTEND_MNM(double);
void DISSOCIATION();
void ENERGY(int ,double &);
void COLLISIONS();
void SETXT();
void READ_RESTART();
void WRITE_RESTART();
void READ_DATA();
void OUTPUT_RESULTS();
void MOLECULES_MOVE_1D();
class Managed
{
public:
void *operator new(size_t len) {
void *ptr;
cudaMallocManaged(&ptr, len);
cudaDeviceSynchronize();
return ptr;
}
void operator delete(void *ptr) {
cudaDeviceSynchronize();
cudaFree(ptr);
}
};
class CALC : public Managed
{
public:
//declares the variables associated with the calculation
int NVER,MVER,IMEG,NREL,MOLSC,ISF,ISAD,ISECS,IGS,IREM,NNC,IMTS,ERROR,NLINE,ICLASS,NCLASS,NMCC,NMI,NMP,ICN;
double FTIME,TLIM,FNUM,DTM,TREF,TSAMP,TOUT,SAMPRAT,OUTRAT,RANF,TOTCOLI,TOTMOVI,TENERGY,DTSAMP,DTOUT,TPOUT,FRACSAM,TOTMOV,TOTCOL,ENTMASS,ENTREM,CPDTM,TPDTM,TNORM,FNUMF;
double *VNMAX,*TDISS,*TRECOMB,*ALOSS,*EME,*AJM;
double **TCOL;
void d_allocate(int x, double*&arr){
cudaMallocManaged(&arr, x*sizeof(double));
}
void d_allocate(int x, int y, double**&arr){
cudaMallocManaged(&arr, x*sizeof(double));
for(int i =0; i< x; ++i)
cudaMallocManaged(&arr[i], y*sizeof(double));
}
//NVER.MVER.NREL the version number
//IMEG the initial number of megabytes to be used by the program
//MOLSC the target number of molecules per sampling cell
//FTIME the flow time
//TLIM the time at which the calculation stops
//FNUM the number of real molecules represented by each simulated molecule
//CPDTM the maximum number of collisions per time step (standard 0.2)
//TPDTM the maximum number of sampling cell transit times of the flow per time step
//TOTMOV total molecule moves
//TOTCOL total collisions
//TDISS(L) dissociations of species L since sample reset
//TRECOMB(L) recombinations of species L since sample reset
//ENTMASS the current entry mass of which a fraction FREM is to be removed
//ENTREM the remainder (always negative) after molecule removal
//VNMAX(L) the maximum normal velocity component of species L
//TCOL species dependent collision counter
//ISF 0,1 for steady, unsteady flow sampling
//ISAD 0,1 to not automatically adapt cells each output interval in unsteady sampling, 1 to automatically adapt
//ISECS 0,1 for no secondary stream,a secondary stream that applies for positive values of x
//IREM data item to set type of molecule removal
//NNC 0 for normal collisions, 1 for nearest neighbor collisions
//IMTS 0 for uniform move time steps, 1 for time steps that vary over the cells, 2 for fixed time steps
//IGS 0 for initial gas, 1 for stream(s) or reference gas
//ICLASS class of flow
//NCLASS the dimension of PX for the class of flow
//NMCC desired number of molecules in a collision cell
//NMI the initial number of molecules
//TNORM normalizing time (may vary e.g. mean collision time , or a transit time)
//ALOSS(L) number of molecules of speciel L lost in the move rourine
//EME(L) number of species L that enter the front boundary
//AJM(L) the adjustment number to allow for negative downstream entry numbers
//NMP the number of molecules at the start of the move routine
//ICN 0 if molecules with ITYPE(2)=4 are not kept constant, 1 to keep molecule number constant
//FNUMF adjustment factor that is applied to automatically generated value
};
class MOLECS : public Managed
{
//declares the variables associated with the molecules
public:
int *IPCELL,*IPSP,*ICREF,*IPCP;
int **IPVIB;
void i_allocate(int x, int *&arr){
cudaMallocManaged(&arr, x*sizeof(int));
}
void i_allocate(int x, int y, int **&arr){
cudaMallocManaged(&arr, x*sizeof(int));
for(int i =0; i< x; ++i)
cudaMallocManaged(&arr[i], y*sizeof(int));
}
double **PX,**PV;
double *PTIM,*PROT,*PELE;
void d_allocate(int x, double *&arr){
cudaMallocManaged(&arr, x*sizeof(double));
}
void d_allocate(int x, int y, double **&arr){
cudaMallocManaged(&arr, x*sizeof(double));
for(int i =0; i< x; ++i){
try{
cudaMallocManaged(&arr[i], y*sizeof(double));
}
catch (std::bad_alloc& ba){
std::cerr << "bad_alloc caught: " << ba.what() << '\n';
}
}
}
int NM,MNM;
//PX(1,2 or 3,N) x,y,z position coordinates of molecule N
//PTIM(N) molecule time
//IPSP(N) the molecular species
//IPCELL(N) the collision cell number
//ICREF the cross-reference array (molecule numbers in order of collision cells)
//IPCP(N) the code number of the last collision partner of molecule
//PV(1-3,N) u,v,w velocity components
//PROT(N) rotational energy
//IPVIB(K,N) level of vibrational mode K of molecule N
//PELE(N) electronic energy
//NM number of molecules
//MNM the maximum number of molecules
};
class GAS : public Managed
{
//declares the variables associated with the molecular species and the stream definition
public:
double RMAS,CXSS,RGFS,VMPM,FDEN,FPR,FMA,FPM,CTM;
double FND[3],FTMP[3],FVTMP[3],VFX[3],VFY[3],TSURF[3],FSPEC[3],VSURF[3];
double *ERS,*CR,*TNEX,*PSF,*SLER,*FP;
double **FSP,**SP,**SPR,**SPV,**VMP;
double ***SPM,***SPVM,***ENTR,***QELC,***SPRT;
double ****SPEX,****SPRC,****SPRP;
double *****SPREX;
void d_allocate(int x, double *&arr){
cudaMallocManaged(&arr, x*sizeof(double));
}
void d_allocate(int x, int y, double **&arr){
cudaMallocManaged(&arr, x*sizeof(double));
for(int i =0; i< x; ++i)
cudaMallocManaged(&arr[i], y*sizeof(double));
}
void d_allocate(int x, int y, int z, double***&arr){
cudaMallocManaged(&arr, x*sizeof(double));
for (int i = 0; i < x; ++i)
{
cudaMallocManaged(&arr[i], y*sizeof(double));
for (int j = 0; j < y; ++j)
cudaMallocManaged(&arr[i][j], z*sizeof(double));
}
}
void d_allocate(int x, int y, int z, int w, double ****&arr){
cudaMallocManaged(&arr, x*sizeof(double));
for (int i = 0; i < x; ++i)
{
cudaMallocManaged(&arr[i], y*sizeof(double));
for (int j = 0; j < y; ++j)
{
cudaMallocManaged(&arr[i][j], z*sizeof(double));
for(int k=0; k<z; ++k)
cudaMallocManaged(&arr[i][j][k], w*sizeof(double));
}
}
}
void d_allocate(int x, int y, int z, int w, int v, double*****&arr){
cudaMallocManaged(&arr, x*sizeof(double));
for (int i = 0; i < x; ++i)
{
cudaMallocManaged(&arr[i], y*sizeof(double));
for (int j = 0; j < y; ++j)
{
cudaMallocManaged(&arr[i][j], z*sizeof(double));
for(int k=0; k<z; ++k)
{
cudaMallocManaged(&arr[i][j][k], w*sizeof(double));
for(int l=0; l<w; ++l)
cudaMallocManaged(&arr[i][j][k][l], v*sizeof(double));
}
}
}
}
int MSP,MMVM,MMRM,MNSR,IGAS,MMEX,MEX,MELE,MVIBL;
int *ISP,*ISPV,*NELL;
int **ISPR,**LIS,**LRS,**ISRCD,**ISPRC,**ISPRK,**TREACG,**TREACL,**NSPEX,**NSLEV;
int ***ISPVM,***NEX;
int ****ISPEX;
void i_allocate(int x, int *&arr){
cudaMallocManaged(&arr, x);
}
void i_allocate(int x, int y, int **&arr){
cudaMallocManaged(&arr, x*sizeof(int));
for(int i =0; i< x; ++i)
cudaMallocManaged(&arr[i], y*sizeof(int));
}
void i_allocate(int x, int y, int z, int ***&arr){
cudaMallocManaged(&arr, x*sizeof(int));
for (int i = 0; i < x; ++i)
{
cudaMallocManaged(&arr[i], y*sizeof(int));
for (int j = 0; j < y; ++j)
cudaMallocManaged(&arr[i][j], z*sizeof(int));
}
}
void i_allocate(int x, int y, int z, int w, int ****&arr){
cudaMallocManaged(&arr, x*sizeof(int));
for (int i = 0; i < x; ++i)
{
cudaMallocManaged(&arr[i], y*sizeof(int));
for (int j = 0; j < y; ++j)
{
cudaMallocManaged(&arr[i][j], z*sizeof(int));
for(int k=0; k<z; ++k)
cudaMallocManaged(&arr[i][j][k], w*sizeof(int));
}
}
}
//MSP the number of molecular species
//MMVM the maximum number of vibrational modes of any species
//MEX number of exchange or chain reactions
//MELE the maximum number of electronic states of any molecule
//MVIBL the maximum number of vibrational levels for detailed balance lists
//MMEX the maximum number of exchange reactions involving the same precollision pair of molecules
//MMRM 0 if gass is completely monatomic, 1 if some species have rotation
//MNSR the number oF surface reactions
//SP(1,L) the reference diameter of species L
//SP(2,L) the reference temperature of species L
//SP(3,L) the viscosity-temperature power law of species L
//SP(4,L) the reciprocal of the VSS scattering parameter
//SP(5,L) molecular mass of species L
//SP(6,L) the heat of formation at 273 K.
//ISPR(1,L) number of rotational degrees of freedom of species L
//ISPR(2,L) 0,1 for constant, polynomial rotational relaxation collision number
//SPR(1,L) constant rotational relaxation collision number of species L
// or the constant in a second order polynomial in temperature
//SPR(2,L) the coefficient of temperature in the polynomial
//SPR(3,L) the coefficient of temperature squared in the polynomial
//SPM(1,L,M) the reduced mass for species L,M
//SPM(2,L,M) the reference collision cross-section for species L,M
//SPM(3,L,M) the mean value of the viscosity-temperature power law
//SPM(4,L,M) the reference diameter for L,M collisions
//SPM(5,L,M) the reference temperature for species L,M
//SPM(6,L,M) reciprocal of the gamma function of (5/2-w) for species L,M
//SPM(7,L,M) rotational relaxation collision number for species L,M, or const in polynomial
//SPM(8,L,M) reciprocal of VSS scattering parameter
//ISPV(L) the number of vibrational modes
//SPVM(1,K,L) the characteristic vibrational temperature
//SPVM(2,K,L) constant Zv, or reference Zv for mode K
//SPVM(3,K,L) -1. for constant Zv, or reference temperature
//SPVM(4,K,L) the characteristic dissociation temperature
//SPVM(5,K,L) the arbitrary rate reduction factor
//ISPVM(1,K,L) the species code of the first dissociation product
//ISPVM(2,K,L) the species code of the second dissociation product
//NELL(L) the number of electronic levels of species L
//QELC(N,M,L) for up to M levels of form g*exp(-a/T) in the electronic partition function for species L
// N=1 for the degeneracy g
// N=2 for the coefficient a
// N=3 for the ratio of the excitation cross-section to the elastic cross-section
//ISPRC(L,M) the species of the recombined molecule from species L and M
//ISPRK(L,M) the applicable vibrational mode of this species
//SPRC(1,L,M,K) the constant a in the ternary collision volume
//SPRC(2,L,M,K) the temperature exponent b in the ternary collision volume
//SPRT(1,L,M) lower temperature value for SPRP
//SPRT(2,L,M) higher temperature value for SPRP
//SPRP(1,L,M,K) the cumulative dissociation distribution to level K for products L and M at the lower temperature
//SPRP(2,L,M,K) ditto at higher temperature, for application to post-recombination molecule//
//NSPEX(L,M) the number of exchange reactios with L,M as the pre-collision species
//in the following variables, J is the reaction number (1 to NSPEX(L,M))
//ISPEX(J,1,L,M) the species that splits in an exchange reaction
//ISPEX(J,2,L,M) the other pre-reaction species (all ISPEX are set to 0 if no exchange reaction)
//ISPEX(J,3,L,M) the post-reaction molecule that splits in the opposite reaction
//ISPEX(J,4,L,M) the other post-reaction species
//ISPEX(J,5,L,M) the vibrational mode of the molecule that splits
//ISPEX(J,6,L,M) degeneracy of this reaction
//ISPEX(J,7,L,M) the vibrational mode of the molecule that splits
//SPEX(1,J,L,M) the constant a in the reaction probability for the reverse reaction
//SPEX(2,J,L,M) the temperature exponent b in the reaction probability (reverse reaction only)
//SPEX(3,J,L,M) for the heat of reaction
//SPEX(4,J,L,M) the lower temperature for SPREX
//SPEX(5,J,L,M) the higher temperature for SPREX
//SPEX(6,J,L,M) the energy barrier
//SPREX(1,J,L,M,K) at lower temperature, the Jth reverse exchange reaction of L,M cumulative level K viv. dist of post reac mol
//SPREX(2,J,L,M,K) ditto at higher temperature
//TNEX(N) total number of exchange reaction N
//NEX(N,L,M) the code number of the Nth exchange or chain reaction in L,M collisions
//RMAS reduced mass for single species case
//CXSS reference cross-section for single species case
//RGFS reciprocal of gamma function for single species case
//for the following, J=1 for the reference gas and/or the minimum x boundary, J=2 for the secondary sream at maximum x boundary
//FND(J) stream or reference gas number density
//FTMP(J) stream temperature
//FVTMP(J) the vibrational and any electronic temperature in the freestream
//VFX(J) the x velocity components of the stream
//VFY(J) the y velocity component in the stream
//FSP(N,J)) fraction of species N in the stream
//FMA stream Mach number
//VMP(N,J) most probable molecular velocity of species N at FTMP(J)
//VMPM the maximum value of VMP in stream 1
//ENTR(M,L,K) entry/removal information for species L at K=1 for 1, K=2 for XB(2)
// M=1 number per unut time
// M=2 remainder
// M=3 speed ratio
// M=4 first constant
// M=5 second constant
// M=6 the maxinum normal velocity component in the removal zone (> XREM)
//LIS(1,N) the species code of the first incident molecule
//LIS(2,N) the species code of the second incident molecule (0 if none)
//LRS(1,N) the species code of the first reflected molecule
//LRS(2,N) the species code of the second reflected molecule (0 if none)
//LRS(3,N) the species code of the third reflected molecule (0 if none)
//LRS(4,N) the species code of the fourth reflected molecule (0 if none)
//LRS(5,N) the species code of the fifth reflected molecule (0 if none)
//LRS(6,N) the species code of the sixth reflected molecule (0 if none)
//ERS(N) the energy of the reaction (+ve for recombination, -ve for dissociation)
//NSRSP(L) number of surface reactions that involve species L as incident molecule
//ISRCD(N,L) code number of Nth surface reaction with species L as incident molecule
//CTM mean collision time in stream
//FPM mean free path in stream
//FDEN stream 1 density
//FPR stream 1 pressure
//FMA stream 1 Mach number
//RMAS reduced mass for single species case
//CXSS reference cross-section for single species case
//RGFS reciprocal of gamma function for single species case
//CR(L) collision rate of species L
//FP(L) mean free path of species L
//TREACG(N,L) the total number of species L gained from reaction type N=1 for dissociation, 2 for recombination, 3 for forward exchange, 4 for reverse exchange
//TREACL(N,L) the total number of species L lost from reaction type N=1 for dissociation, 2 for recombination, 3 for forward exchange, 4 for reverse exchange
//NSLEV(2,L) 1 exo, 2 endo: vibrational levels to be made up for species L in detailed balance enforcement after reaction
//SLER(L) rotational energy to be made up for species L in detailed balance enforcement after exothermic reaction
};
class OUTPUT : public Managed
{
public:
//declares the variables associated with the sampling and output
int NSAMP,NMISAMP,NOUT,NDISSOC,NRECOMB,NTSAMP;
//int NDISSL[201];
int *NDISSL;
OUTPUT(){
cudaMallocManaged(&NDISSL,201*sizeof(int));
};
double TISAMP,XVELS,YVELS,AVDTM;
double *COLLS,*WCOLLS,*CLSEP,*SREAC,*STEMP,*TRANSTEMP,*ROTTEMP,*VIBTEMP,*ELTEMP;
double **VAR,**VARS,**CSSS,**SUMVIB;
double ***CS,***VARSP,***VIBFRAC;
double ****CSS;
void d_allocate(int x, double *&arr){
cudaMallocManaged(&arr, x*sizeof(double));
}
void d_allocate(int x, int y, double **&arr){
cudaMallocManaged(&arr, x*sizeof(double));
for(int i =0; i< x; ++i)
cudaMallocManaged(&arr[i], y*sizeof(double));
}
void d_allocate(int x, int y, int z, double ***&arr){
cudaMallocManaged(&arr, x*sizeof(double));
for (int i = 0; i < x; ++i)
{
cudaMallocManaged(&arr[i], y*sizeof(double));
for (int j = 0; j < y; ++j)
cudaMallocManaged(&arr[i][j], z*sizeof(double));
}
}
void d_allocate(int x, int y, int z, int w, double ****&arr){
cudaMallocManaged(&arr, x*sizeof(double));
for (int i = 0; i < x; ++i)
{
cudaMallocManaged(&arr[i], y*sizeof(double));
for (int j = 0; j < y; ++j)
{
cudaMallocManaged(&arr[i][j], z*sizeof(double));
for(int k=0; k<z; ++k)
cudaMallocManaged(&arr[i][j][k], w*sizeof(double));
}
}
}
//NSAMP the number of samples
//TISAMP the time at which the sampling was last reset
//MNISAMP the number of molecules at the last reset
//AVDTM the average value of DTM in the cells
//NOUT the number of output intervals
//COLLS(N) total number of collisions in sampling cell N
//WCOLLS(N) total weighted collisins in N
//CLSEP(N) sum of collision pair separation in cell N
//CS(0,N,L) sampled number of species L in cell N
//CS(1,N,L) sampled weighted number of species L in cell N
//--all the following CS are weighted sums
//CS(2,N,L), CS(3,N,L), CS(4,N,L) sampled sum of u, v, w
//CS(5,N,L), CS(6,N,L), CS(7,N,L) sampled sum of u*u, v*v, w*w
//CS(8,N,L) sampled sum of rotational energy of species L in cell N
//CS(9,N,L) sampled sum of electronic energy of species L in cell N
//CS(9+K,N,L) sampled sum of vibrational level of species L in cell N
// K is the mode
//
//in CSS, M=1 for incident molecules and M=2 for reflected molecules
//J=1 for surface at x=XB(1), 2 for surface at x=XB(2)
//
//CSS(0,J,L,M) number sum of molecules of species L
//CSS(1,J,L,M) weighted number sum of molecules of species L
//--all the following CSS are weighted
//CSS(2,J,L,M) normal momentum sum to surface
//CSS(3,J,L,M) y momentum sum to surface
//CSS(4,J,L,M) z momentum sum to surface
//CSS(5,J,L,M) tranlational energy sum to surface
//CSS(6,J,L,M) rotational energy sum to surface
//CSS(7,J,L,M) vibrational energy sum to the surface
//CSS(8,J,L,M) electronic energy sum to the surface
//
//CSSS(1,J) weighted sum (over incident AND reflected molecules) of 1/normal vel. component
//--all the following CSSS are weighted
//CSSS(2,J) similar sum of molecular mass / normal vel. component
//CSSS(3,J) similar sum of molecular mass * parallel vel. component / normal vel. component
//CSSS(4,J) similar sum of molecular mass * speed squared / normal vel. component
//CSSS(5,J) similar sum of rotational energy / normal vel. component
//CSSS(6,J) similar sum of rotational degrees of freedom /normal velocity component
//
//SREAC(N) the number of type N surface reactions
//
//VAR(M,N) the flowfield properties in cell N
//M=1 the x coordinate
//M=2 sample size
//M=3 number density
//M=4 density
//M=5 u velocity component
//M=6 v velocity component
//M=7 w velocity component
//M=8 translational temperature
//M=9 rotational temperature
//M=10 vibrational temperature
//M=11 temperature
//M=12 Mach number
//M=13 molecules per cell
//M=14 mean collision time / rate
//M=15 mean free path
//M=16 ratio (mean collisional separation) / (mean free path)
//M=17 flow speed
//M=18 scalar pressure nkT
//M=19 x component of translational temperature TTX
//M=20 y component of translational temperature TTY
//M=21 z component of translational temperature TTZ
//M=22 electronic temperature
//
//VARSP(M,N,L) the flowfield properties for species L in cell N
//M=0 the sample size
//M=1 the fraction
//M=2 the temperature component in the x direction
//M=3 the temperature component in the y direction
//M=4 the temperature component in the z direction
//M=5 the translational temperature
//M=6 the rotational temperature
//M=7 the vibrational temperature
//M=8 the temperature
//M=9 the x component of the diffusion velocity
//M=10 the y component of the diffusion velocity
//M=11 the z component of the diffusion velocity
//M=12 the electronic temperature
//
//VARS(N,M) surface property N on interval L of surface M
//
//N=0 the unweighted sample (remainder of variables are weighted for cyl. and sph. flows)
//N=1 the incident sample
//N=2 the reflected sample
//N=3 the incident number flux
//N=4 the reflected number flux
//N=5 the incident pressure
//N=6 the reflected pressure
//N=7 the incident parallel shear tress
//N=8 the reflected parallel shear stress
//N=9 the incident normal-to-plane shear stress
//N=10 the reflected normal shear stress
//N=11 the incident translational heat flux
//N=12 the reflected translational heat fluc
//N=13 the incident rotational heat flux
//N=14 the reflected rotational heat flux
//N=15 the incident vibrational heat flux
//N=16 the reflected vibrational heat flux
//N=17 the incident heat flux from surface reactions
//N=18 the reflected heat flux from surface reactions
//N=19 slip velocity
//N=20 temperature slip
//N=21 rotational temperature slip
//N=22 the net pressure
//N=23 the net parallel in-plane shear
//N=24 the net parallel normal-to-plane shear
//N=25 the net translational energy flux
//N=26 the net rotational heat flux
//N=27 the net vibrational heat flux
//N=28 the heat flux from reactions
//N=29 total incident heat transfer
//N=30 total reflected heat transfer
//N=31 net heat transfer
//N=32 surface temperature --not implemented
//N=33 incident electronic energy
//N=34 reflected electronic energy
//N=35 net electronic energy
//N=35+K the percentage of species K
//
//COLLS(N) the number of collisions in sampling cell N
//WCOLLS(N) weighted number
//CLSEP(N) the total collision partner separation distance in sampling cell N
//
//VIBFRAC(L,K,M) the sum of species L mode K in level M
//SUMVIB(L,K) the total sample in VIBFRAC
//
//THE following variables apply in the sampling of distribution functions
//(some are especially for the dissociation of oxygen
//
//NDISSOC the number of dissociations
//NRECOMB the number of recombinations
//NDISSL(L) the number of dissociations from level
//NTSAMP the number of temperature samples
//STEMP(L) the temperature of species L
//TRANSTEMP(L) the translational temperature of species N
//ROTTEMP(L) rotational temperature of species N
//VIBTEMP(L) vibrational temperature of species N
//ELTEMP(L) electronic temperature of species N
//
};
class GEOM_1D : public Managed
{
public:
//declares the variables associated with the flowfield geometry and cell structure
//for homogeneous gas and one-dimensional flow studies
int NCELLS,NCCELLS,NCIS,NDIV,MDIV,ILEVEL,IFX,JFX,IVB,IWF;
//int ITYPE[3];
int *ITYPE;
int *ICELL;
int ** ICCELL,**JDIV;
void i_allocate(int x, int *&arr){
cudaMallocManaged(&arr, x*sizeof(int));
}
void i_allocate(int x, int y, int **&arr){
cudaMallocManaged(&arr, x*sizeof(int));
for(int i =0; i< x; ++i)
cudaMallocManaged(&arr[i], y*sizeof(int));
}
double DDIV,XS,VELOB,WFM,AWF,FREM,XREM;
//double XB[3];
double *XB;
double **CELL,**CCELL;
void d_allocate(int x, int y, double**&arr){
cudaMallocManaged(&arr, x*sizeof(double));
for(int i =0; i< x; ++i)
cudaMallocManaged(&arr[i], y*sizeof(double));
}
GEOM_1D(){
cudaMallocManaged(&ITYPE, 3*sizeof(int));
cudaMallocManaged(&XB, 3*sizeof(double));
}
//
//XB(1), XB(2) the minimum, maximum x coordinate
//DDIV the width of a division
//ITYPE(K) the tpe of boundary at the minimum x (K=1) and maximum x (K=2) boundaries
// 0 for a stream boundary
// 1 for a plane of symmetry
// 2 for a solid surface
// 3 for a vacuum
//NCELLS the number of sampling cells
//NCCELLS the number of collision cells
//NCIS the number of collision cells in a sampling cell
// MDIV the maximum number of sampling cell divisions at any level of subdivision
//IVB 0,1 for stationary, moving outer boundary
//IWF 0 for no radial weighting factors, 1 for radial weighting factors
//WFM, set in data as the maximum weighting factor, then divided by the maximum radius
//AWF overall ratio of real to weighted molecules
//VELOB the speed of the outer boundary
//ILEV level of subdivision in adaption (0 before adaption)
//JDIV(N,M) (-cell number) or (start address -1 in JDIV(N+1,M), where M is MDIV
//IFX 0 for plane flow, 1 for cylindrical flow, 3 for spherical flow
//JFX IFX+1
//CELL(M,N) information on sampling cell N
// M=1 x coordinate
// M=2 minimum x coordinate
// M=3 maximum x cooedinate
// M=4 volume
//ICELL(N) number of collision cells preceding those in sampling cell N
//CCELL(M,N) information on collision cell N
// M=1 volume
// M=2 remainder in collision counting
// M=3 half the allowed time step
// M=4 maximum value of product of cross-section and relative velocity
// M=5 collision cell time
//ICCELL(M,N) integer information on collision cell N
// M=1 the (start address -1) in ICREF of molecules in collision cell N
// M=2 the number of molecules in collision cell N
// M=3 the sampling cell in which the collision cell lies
//FREM fraction of molecule removal
//XREM the coordinate at which removal commences
//
};
clock_t start;
fstream file_9;
fstream file_18;
CALC *calc = new CALC;
GAS *gas = new GAS;
MOLECS *molecs = new MOLECS;
GEOM_1D *geom = new GEOM_1D;
OUTPUT *output =new OUTPUT;
template <typename T>
string to_string(T value)
{
std::ostringstream os ;
os << value ;
return os.str() ;
}
int main()
{
// //CALC calc;
// //MOLECS molecs;
// //GAS gas;
// //OUTPUT output;
// //GEOM_1D geom;
//
// IMPLICIT NONE\
//
int IRUN,ICONF,N,M,IADAPT,IRETREM,ISET;
double A;
//
fstream file_7;
calc->NVER=1; //for major changes, e.g. to basic architecture
calc->MVER=1 ; //significant changes, but must change whenever the data in a DSnD.DAT file changes
calc->NREL=1 ; //the release number
//
//***********************
//set constants
// PI=3.1415926535897932D00
// DPI=6.283185307179586D00
// SPI=1.772453850905516D00
// BOLTZ=1.380658D-23
// AVOG=6.022169D26
//***********************
//
//*************************************************
//**** ADJUSTABLE COMPUTATIONAL PARAMETERS ****
//*************************************************
//
calc->NMCC=15; //DEFAULT=15--desired number of simulated molecules in a collision cell
//
calc->CPDTM=0.2; //DEFAULT=0.2--fraction of the local mean collision time that is the desired maximum time step
//
calc->TPDTM=0.5 ; //DEFAULT=0.5--the fraction or multiple of a sampling cell transit time that is the desired maximum time step
//
calc->NNC=1; //DEFAULT=0--0 to select collision partner randomly from collision cell, 1 for nearest-neighbor collisions
//
calc->SAMPRAT=5; //DEFAULT=5--the number of time steps in a sampling interval
//
calc->OUTRAT=10; //50 //DEFAULT=50--the number of flow samples in a file output interval
//
calc->FRACSAM=0.5; //0.5 //DEFAULT=0.5--fraction of the output interval interval over which a time-averaged sample is taken in an unsteady flow
//
calc->ISAD=0; //DEFAULT=0--0,1 to not adapt, to adapt cells automatically at start of output interval in an unsteady flow (not yet implemented)
//
calc->IMTS=2; //DEFAULT=0--0 to set the move time step to the instantaneous overall time step that changes with time
// 1 to use a cell dependent collision time
// 2 to keep the time step fixed at the initial value
//
calc->FNUMF=1; //DEFAULT=1--adjustment factor to the automatically generated value for the number of real molecules
// that are represented by each simulated molecule.
// (The adjustment may be large because the automatic setting assumes that the whole flowfield is at the stream conditions.)
//
//automatic adjustments may be applied for some application classes (e.g homogeneous gas studies)
//
calc->TLIM=1.e-5; //DEFAULT=1.D20 sets an indefinite run - set if a define STOP time is required
//
//************************************************
//
//open a diagnostic file and check whether an instance of the program is already running
//
// fstream file_9;
cout<<"DSMC PROGRAM"<<endl;
file_9.open("DIAG.TXT", ios::trunc | ios::out);
if(file_9.is_open()){
file_9<<"File DIAG.TXT has been opened"<<endl;
cout<<"File DIAG.TXT has been opened"<<endl;
}
else{
cout<<"Stop the DS1.EXE that is already running and try again"<<endl;
//return 0;
}
// OPEN (9,FILE='DIAG.TXT',FORM='FORMATTED',STATUS='REPLACE')
// WRITE (9,*,IOSTAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'Stop the DS1.EXE that is already running and try again'
// STOP
// ELSE
// WRITE (9,*) 'File DIAG.TXT has been opened'
// END IF
//
//open a molecule number history file
//OPEN (13,FILE='MolNum.DAT',FORM='FORMATTED',STATUS='REPLACE')
//
//initialise run variables
IRUN=0;
geom->IVB=0; //will be reset to 1 by restart program if there is a moving wall
//
while((IRUN < 1) || (IRUN > 2)){
cout<< "DSMC Version" <<calc->NVER<<'.'<<calc->MVER<<'.'<<calc->NREL<<endl;
cout<< "enter 1 to continue a current run"<<endl;
cout<< "enter 2 to start a new run :-"<<endl;
//
cin>> IRUN;
}
if(IRUN == 1) file_9<< "Continuing an existing run"<<endl;//WRITE (9,*) 'Continuing an existing run'
if(IRUN == 2) {
cout<< "Enter 1 to confirm, 0 to continue current run :-"<<endl;
cin>> ICONF;
if(ICONF == 1)
file_9<<"Starting a new run"<<endl;//WRITE (9,*) 'Starting a new run'
else{
IRUN=1;
file_9<<"Continuing an existing run"<<endl;
//WRITE (9,*) 'Continuing an existing run'
}
}
//
if(IRUN == 2){ //new run
cout<< "Enter 0 for a homogeneous gas, or"<<endl;
cout<< "Enter 1 for a one-dimensional flow, or"<<endl;
cout<< "Enter 2 for a two-dimensional plane flow, or"<<endl;
cout<< "Enter 3 for a three dimensional flow, or"<<endl;
cout<< "enter 4 for an axially-symmetric flow :-"<<endl;
cin>> calc->ICLASS;
calc->NCLASS=2; //default 2D
if(calc->ICLASS < 2) calc->NCLASS=1; //0D or 1D
if(calc->ICLASS == 3) calc->NCLASS=3; //3D
cout<<"Enter 0 for an eventually steady flow, or"<<endl;
cout<<"enter 1 for a continuing unsteady flow :-"<<endl;
cin>> calc->ISF;
file_7.open("RUN_CLASS.TXT", ios::trunc |ios::out);
if(file_7.is_open()){
cout<<"RUN_CLASS.TXT is opened"<<endl;
}
else{
cout<<"RUN_CLASS.TXT not opened"<<endl;
cin.get();
}
file_7<<calc->ICLASS<<calc->ISF;
file_7.close();
// OPEN (7,FILE='RUN_CLASS.TXT',FORM='FORMATTED',STATUS='REPLACE')
// WRITE (7,*) ICLASS,ISF
// CLOSE (7)
file_9<<"Starting a new run with ICLASS, ISF "<<calc->ICLASS<<" "<<calc->ISF<<endl;
// WRITE (9,*) 'Starting a new run with ICLASS, ISF',ICLASS,ISF
cout<<"Starting a new run with ICLASS, ISF "<<calc->ICLASS<<" "<<calc->ISF<<endl;
}
//
if(IRUN == 1){ //continued run
file_7.open("RUN_CLASS.TXT" , ios::in );
if(file_7.is_open()){
cout<<"RUN_CLASS.TXT is opened"<<endl;
}
else{
cout<<"RUN_CLASS.TXT not opened"<<endl;
cin.get();
}
file_7 >>calc->ICLASS>>calc->ISF;
file_7.close();
// OPEN (7,FILE='RUN_CLASS.TXT',FORM='FORMATTED',STATUS='OLD')
// READ (7,*) ICLASS,ISF
// CLOSE(7)
READ_RESTART();
//
calc->TSAMP=calc->FTIME+calc->DTSAMP;
calc->TOUT=calc->FTIME+calc->DTOUT;
if((gas->MEX > 0) && (calc->ISF == 1)){
cout<<"Enter 0 to continue the reaction sample or"<<endl;
cout<<"enter 1 to continue with a new reaction sample :-"<<endl;
cin>> N;
if(N == 1){
//memset(gas->TNEX,0.e00,sizeof(*gas->TNEX));
//memset(calc->TDISS,0.e00,sizeof(*calc->TDISS));
//memset(calc->TRECOMB,0.e00,sizeof(*calc->TRECOMB));
for(int i=0;i<gas->MEX+1;i++)
gas->TNEX[i]= 0.e00;
for(int i=0;i<gas->MSP+1;i++)
calc->TDISS[i]=0.e00;
for(int i=0;i<gas->MSP+1;i++)
calc->TRECOMB[i]=0.e00;
}
}
//
if((calc->ISAD == 0) && (calc->ISF == 0)){
cout<<"Enter 0 to continue the current sample or"<<endl;
cout<<"enter 1 to continue with a new sample :-"<<endl;
cin>> N;
if(N == 1){
if((geom->ITYPE[2] == 4) && (calc->ICN == 0)){
cout<<"Enter 0 to continue to not enforce constant molecule number"<<endl;
cout<<"enter 1 to start to enforce constant molecule number :-"<<endl;
cin>> M;
if(M == 1) calc->ICN=1;
}
cout<<"Enter 1 to adapt the cells, or 0 to continue with current cells:-"<<endl;
cin>>IADAPT;
if(IADAPT == 1){
cout<<"Adapting cells"<<endl;
ADAPT_CELLS_1D() ;
INDEX_MOLS();
WRITE_RESTART();
}
else
cout<<"Continuing with existing cells"<<endl;
//
if(calc->IREM == 2){
cout<<"Enter 1 to reset the removal details, or 0 to continue with current details:-"<<endl;
cin>>IRETREM;
if(IRETREM == 1){
geom->FREM=-1.e00;
while((geom->FREM < -0.0001) || (geom->FREM > 5.0)){
cout<<"Enter the fraction of entering molecules that are removed:-"<<endl;
cin>>geom->FREM;
cout<<"The ratio of removed to entering mlecules is \t"<<geom->FREM<<endl;
// WRITE (*,999) FREM
}
file_9<<"The ratio of removed to entering mlecules is \t"<<geom->FREM<<endl;
// WRITE (9,999) FREM
// 999 FORMAT (' The ratio of removed to entering molecules is ',G15.5)
if(geom->FREM > 1.e-10){
geom->XREM=geom->XB[1]-1.0;
while((geom->XREM < geom->XB[1]-0.0001) || (geom->XREM > geom->XB[2]+0.0001)){
cout<<"Enter x coordinate of the upstream removal limit:-"<<endl;
cin>>geom->XREM;
cout<<"The molecules are removed from \t"<<geom->XREM<<" to "<<geom->XB[2]<<endl; //988
// WRITE (*,998) XREM,XB(2)
}
file_9<<"The molecules are removed from \t"<<geom->XREM<<" to "<<geom->XB[2]<<endl;
// WRITE (9,998) XREM,XB(2)
// 998 FORMAT (' The molecules are removed from ',G15.5,' to',G15.5)
}
}
}
//
INITIALISE_SAMPLES();
}
}
}
//
if(IRUN == 2){
//
READ_DATA();
//
if(calc->ICLASS < 2) SET_INITIAL_STATE_1D();
//
if(calc->ICLASS == 0) ENERGY(0,A);
//
WRITE_RESTART();
//
}
//
while(calc->FTIME < calc->TLIM){
//
//
calc->FTIME=calc->FTIME+calc->DTM;
//
file_9<<" TIME "<<setw(20)<<setprecision(10)<<calc->FTIME<<" NM "<<molecs->NM<<" COLLS "<<std::left<<setw(20)<<setprecision(10)<<calc->TOTCOL<<endl;
// WRITE (9,*) 'TIME',FTIME,' NM',NM,' COLLS',TOTCOL
cout<< " TIME "<<setw(20)<<setprecision(10)<<calc->FTIME<<" NM "<<molecs->NM<<" COLLS "<<std::left<<setw(20)<<setprecision(10)<<calc->TOTCOL<<endl;
//
// WRITE (13,*) FTIME/TNORM,FLOAT(NM)/FLOAT(NMI) //uncomment if a MOLFILE.DAT is to be generated
//
// WRITE (*,*) 'MOVE'
//cout<<"MOVE"<<endl;
MOLECULES_MOVE_1D();
//
if((geom->ITYPE[1] == 0) || (geom->ITYPE[2] == 0) || (geom->ITYPE[2] == 4)) MOLECULES_ENTER_1D();
//
// WRITE (*,*) 'INDEX'
//ut<<"INDEX"<<endl;
// cout<<calc->TOUT<<endl;
// cin.get();
INDEX_MOLS();
//
// WRITE (*,*) 'COLLISIONS'
COLLISIONS();
//
// if(gas->MMVM > 0) {
// cout<<"DISSOCIATION"<<endl;
// DISSOCIATION();
// }
//
if(calc->FTIME > calc->TSAMP){
// WRITE (*,*) 'SAMPLE'
if(calc->ISF == 0) SAMPLE_FLOW();
if((calc->ISF == 1) && (calc->FTIME < calc->TPOUT+(1.e00-calc->FRACSAM)*calc->DTOUT)){
calc->TSAMP=calc->TSAMP+calc->DTSAMP;
INITIALISE_SAMPLES();
}
if((calc->ISF == 1) && (calc->FTIME >= calc->TPOUT+(1.e00-calc->FRACSAM)*calc->DTOUT)) SAMPLE_FLOW();
}
//
if(calc->FTIME > calc->TOUT){
cout<<"writing OUTPUT"<<endl;
// WRITE (*,*) 'OUTPUT'
WRITE_RESTART();
//
OUTPUT_RESULTS();
calc->TPOUT=calc->FTIME;
}
//
}
return 0;
//
}
void ALLOCATE_GAS()
{
// //GAS gas;
// //CALC calc;
gas->d_allocate(gas->MSP+1,3,gas->FSP);
gas->d_allocate(7,gas->MSP+1,gas->SP);
gas->d_allocate(4,gas->MSP+1,gas->SPR);
gas->d_allocate(9,gas->MSP+1,gas->MSP,gas->SPM);
gas->i_allocate(3,gas->MSP+1,gas->ISPR);
gas->i_allocate(gas->MSP+1,gas->ISPV);
gas->d_allocate(7,gas->MSP+1,3,gas->ENTR);
gas->d_allocate(gas->MSP+1,3,gas->VMP);
calc->d_allocate(gas->MSP+1,calc->VNMAX);
gas->d_allocate(gas->MSP+1,gas->CR);
calc->d_allocate(gas->MSP+1,gas->MSP+1,calc->TCOL);
gas->i_allocate(gas->MSP+1,gas->MSP+1,gas->ISPRC);
gas->i_allocate(gas->MSP+1,gas->MSP+1,gas->ISPRK);
gas->d_allocate(5,gas->MSP+1,gas->MSP+1,gas->MSP+1,gas->SPRC);
gas->i_allocate(gas->MSP+1,gas->NELL);
gas->d_allocate(4,gas->MELE+1,gas->MSP+1,gas->QELC);
gas->d_allocate(3,gas->MSP+1,gas->MSP+1,gas->MVIBL+1,gas->SPRP);
gas->d_allocate(3,gas->MSP+1,gas->MSP+1,gas->SPRT);
calc->d_allocate(gas->MSP+1,calc->AJM);
gas->d_allocate(gas->MSP+1,gas->FP);
calc->d_allocate(gas->MSP+1,calc->ALOSS);
calc->d_allocate(gas->MSP+1,calc->EME);
/*ALLOCATE (FSP(MSP,2),SP(6,MSP),SPR(3,MSP),SPM(8,MSP,MSP),ISPR(2,MSP),ISPV(MSP),ENTR(6,MSP,2), &
VMP(MSP,2),VNMAX(MSP),CR(MSP),TCOL(MSP,MSP),ISPRC(MSP,MSP),ISPRK(MSP,MSP),SPRC(4,MSP,MSP,MSP), &
NELL(MSP),QELC(3,MELE,MSP),SPRP(2,MSP,MSP,0:MVIBL),SPRT(2,MSP,MSP),AJM(MSP),FP(MSP), &
ALOSS(MSP),EME(MSP),STAT=ERROR)
//
IF (ERROR /= 0) THEN
WRITE (*,*)'PROGRAM COULD NOT ALLOCATE SPECIES VARIABLES',ERROR
END IF
//*/
gas->i_allocate(gas->MMEX+1,gas->MSP+1,gas->MSP+1,gas->NEX);
gas->i_allocate(gas->MSP+1,gas->MSP+1,gas->NSPEX);
gas->d_allocate(7,gas->MMEX+1,gas->MSP+1,gas->MSP+1,gas->SPEX);
gas->i_allocate(gas->MMEX+1,8,gas->MSP+1,gas->MSP+1,gas->ISPEX);
gas->i_allocate(5,gas->MSP+1,gas->TREACG);
gas->d_allocate(gas->MMEX+1,gas->PSF);
gas->i_allocate(5,gas->MSP+1,gas->TREACL);
gas->d_allocate(gas->MEX+1,gas->TNEX);
gas->d_allocate(3,gas->MMEX+1,gas->MSP+1,gas->MSP+1,gas->MVIBL+1,gas->SPREX);
gas->i_allocate(3,gas->MSP+1,gas->NSLEV);
gas->d_allocate(gas->MSP+1,gas->SLER);
// ALLOCATE (NEX(MMEX,MSP,MSP),NSPEX(MSP,MSP),SPEX(6,MMEX,MSP,MSP),ISPEX(MMEX,7,MSP,MSP),TREACG(4,MSP), &
// PSF(MMEX),TREACL(4,MSP),TNEX(MEX),SPREX(2,MMEX,MSP,MSP,0:MVIBL),NSLEV(2,MSP),SLER(MSP),STAT=ERROR)
// //
// IF (ERROR /= 0) THEN
// WRITE (*,*)'PROGRAM COULD NOT ALLOCATE Q-K REACTION VARIABLES',ERROR
// END IF
// //
if(gas->MMVM >= 0){
gas->d_allocate(6,gas->MMVM+1,gas->MSP+1,gas->SPVM);
gas->i_allocate(3,gas->MMVM+1,gas->MSP+1,gas->ISPVM);
calc->d_allocate(gas->MSP+1,calc->TDISS);
calc->d_allocate(gas->MSP+1,calc->TRECOMB);
//ALLOCATE (SPVM(5,MMVM,MSP),ISPVM(2,MMVM,MSP),TDISS(MSP),TRECOMB(MSP),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*)'PROGRAM COULD NOT ALLOCATE VIBRATION VARIABLES',ERROR
}
//N.B. surface reactions are not yet implemented
if(gas->MNSR > 0){
gas->d_allocate(gas->MNSR+1,gas->ERS);
gas->i_allocate(3,gas->MNSR+1,gas->LIS);
gas->i_allocate(7,gas->MNSR+1,gas->LRS);
gas->i_allocate(gas->MNSR+1,gas->MSP+1,gas->ISRCD);
//ALLOCATE (ERS(MNSR),LIS(2,MNSR),LRS(6,MNSR),ISRCD(MNSR,MSP),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*)'PROGRAM COULD NOT ALLOCATE SURFACE REACTION VARIABLES',ERROR
}
//calc->AJM=0.e00;
//memset(calc->AJM,0.e00,sizeof(*calc->AJM));
for(int i=0;i<gas->MSP+1;i++){
calc->AJM[i]=0.e00;
}
return;
}
void HARD_SPHERE()
{
////GAS gas;
////CALC calc;
cout<<"Reading HARD_SPHERE Data"<<endl;
gas->MSP=1;
gas->MMRM=0;
gas->MMVM=0;
gas->MNSR=0;
gas->MEX=0;
gas->MMEX=0;
gas->MELE=1;
gas->MVIBL=0;
ALLOCATE_GAS();
gas->SP[1][1]=4.0e-10; //reference diameter
gas->SP[2][1]=273.0; //reference temperature
gas->SP[3][1]=0.5; //viscosity-temperature index
gas->SP[4][1]=1.0; //reciprocal of VSS scattering parameter (1 for VHS)
gas->SP[5][1]=5.e-26; //mass
gas->ISPR[1][1]=0; //number of rotational degrees of freedom
cout<<"Hard Sphere data done"<<endl;
return;
}
void ARGON()
{
// //GAS gas;
// //CALC calc;
cout<<"Reading Argon Data"<<endl;
gas->MSP=1;
gas->MMRM=0;
gas->MMVM=0;
gas->MNSR=0;
gas->MEX=0;
gas->MMEX=0;
gas->MELE=1;
gas->MVIBL=0;
ALLOCATE_GAS();
gas->SP[1][1]=4.17e-10;
gas->SP[2][1]=273.15;
gas->SP[3][1]=0.81;
gas->SP[4][1]=1.0;
gas->SP[5][1]=6.63e-26;
gas->ISPR[1][1]=0;
gas->ISPR[2][1]=0;
cout<<"Argon Data done"<<endl;
return;
}
//
void IDEAL_NITROGEN()
{
// //GAS gas;
// //CALC calc;
cout<<"Reading IDEAL_NITROGEN data"<<endl;
gas->MSP=1;
gas->MMRM=1;
gas->MMVM=0;
gas->MNSR=0;
gas->MEX=0;
gas->MMEX=0;
gas->MELE=0;
gas->MVIBL=0;
ALLOCATE_GAS();
gas->SP[1][1]=4.17e-10;
gas->SP[2][1]=273.0;
gas->SP[3][1]=0.74;
gas->SP[4][1]=1.0;
gas->SP[5][1]=4.65e-26;
gas->ISPR[1][1]=2;
gas->ISPR[2][1]=0;
gas->SPR[1][1]=5.0;
return;
}
//
void REAL_OXYGEN()
{
//
//GAS gas;
//CALC calc;
cout<<"Reading Real_Oxygen data"<<endl;
gas->MSP=2;
gas->MMRM=1;
gas->MMVM=1;
gas->MNSR=0;
gas->MEX=0;
gas->MMEX=0;
gas->MELE=5;
gas->MVIBL=26;
ALLOCATE_GAS();
gas->SP[1][1]=4.07e-10;
gas->SP[2][1]=273.00;
gas->SP[3][1]=0.77e00;
gas->SP[4][1]=1.e00;
gas->SP[5][1]=5.312e-26;
gas->SP[6][1]=0.e00;
gas->ISPR[1][1]=2;
gas->ISPR[2][1]=0 ; //0,1 for constant,polynomial rotational relaxation collision number
gas->SPR[1][1]=5.0; // the collision number or the coefficient of temperature in the polynomial (if a polynomial, the coeff. of T^2 is in spr_db(3 )
gas->ISPV[1]=1 ; // the number of vibrational modes
gas->SPVM[1][1][1]=2256.e00 ; // the characteristic vibrational temperature
gas->SPVM[2][1][1]=90000.e00; // a constant Zv, or the reference Zv
gas->SPVM[3][1][1]=2256.e00; // -1 for a constant Zv, or the reference temperature
gas->SPVM[5][1][1]=1.0; //arbitrary reduction factor
gas->ISPVM[1][1][1]=2;
gas->ISPVM[2][1][1]=2;
gas->NELL[1]=3;
if(gas->MELE > 1){
//******
gas->QELC[1][1][1]=3.0;
gas->QELC[2][1][1]=0.0;
gas->QELC[3][1][1]=50.0; //500.
gas->QELC[1][2][1]=2.0;
gas->QELC[2][2][1]=11393.0;
gas->QELC[3][2][1]=50.0; //500 //for equipartition, the cross-section ratios must be the same for all levels
gas->QELC[1][3][1]=1.0;
gas->QELC[2][3][1]=18985.0;
gas->QELC[3][3][1]=50.0; //500.
}
//
//species 2 is atomic oxygen
gas->SP[1][2]=3.e-10;
gas->SP[2][2]=273.e00;
gas->SP[3][2]=0.8e00;
gas->SP[4][2]=1.e00;
gas->SP[5][2]=2.656e-26;
gas->SP[6][2]=4.099e-19;
gas->ISPR[1][2]=0;
gas->ISPV[2]=0; //must be set//
//set electronic information
if(gas->MELE > 1){
gas->NELL[2]=5;
gas->QELC[1][1][2]=5.0;
gas->QELC[2][1][2]=0.0;
gas->QELC[3][1][2]=50.0;
gas->QELC[1][2][2]=3.0;
gas->QELC[2][2][2]=228.9;
gas->QELC[3][2][2]=50.0;
gas->QELC[1][3][2]=1.0;
gas->QELC[2][3][2]=325.9;
gas->QELC[3][3][2]=50.0;
gas->QELC[1][4][2]=5.0;
gas->QELC[2][4][2]=22830.0;
gas->QELC[3][4][2]=50.0;
gas->QELC[1][5][2]=1.0;
gas->QELC[2][5][2]=48621.0;
gas->QELC[3][5][2]=50.0;
}
//set data needed for recombination
//
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->ISPRC[i][j]=0;
gas->ISPRK[i][j]=0;
}
}
// gas->ISPRC=0;
// gas->ISPRK=0;
gas->ISPRC[2][2]=1; //O+O -> O2 recombined species code for an O+O recombination
gas->ISPRK[2][2]=1 ; //the relevant vibrational mode of this species
gas->SPRC[1][2][2][1]=0.04;
gas->SPRC[2][2][2][1]=-1.3;
gas->SPRC[1][2][2][2]=0.05;
gas->SPRC[2][2][2][2]=-1.1;
gas->SPRT[1][2][2]=5000.e00;
gas->SPRT[2][2][2]=15000.e00;
//
//memset(gas->NSPEX,0,sizeof(**gas->NSPEX));
//memset(gas->SPEX,0.e00,sizeof(****gas->SPEX));
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->NSPEX[i][j]=0;
}
}
for(int i=0;i<7;i++){
for(int j=0;j<gas->MMEX+1;j++){
for(int k=0;k<gas->MSP+1;k++){
for(int l=0;l<gas->MSP+1;l++)
gas->SPEX[i][j][k][l]=0.e00;
}
}
}
//gas->SPEX=0.e00;
gas->ISPEX=0;
//
DERIVED_GAS_DATA();
//
cout<<"Real_Oxygen data done"<<endl;
return;
}
//
void IDEAL_AIR()
{
//GAS gas;
//CALC calc;
cout<<"Reading IDEAL_AIR data"<<endl;
gas->MSP=2;
gas->MMRM=1;
gas->MMVM=0;
gas->MNSR=0;
gas->MEX=0;
gas->MMEX=0;
gas->MELE=1;
gas->MVIBL=0;
//
ALLOCATE_GAS();
//
gas->SP[1][1]=4.07e-10;
gas->SP[2][1]=273.0;
gas->SP[3][1]=0.77;
gas->SP[4][1]=1.0;
gas->SP[5][1]=5.312e-26;
gas->ISPR[1][1]=2;
gas->ISPR[2][1]=0;
gas->SPR[1][1]=5.0;
gas->SP[1][2]=4.17e-10;
gas->SP[2][2]=273.0;
gas->SP[3][2]=0.74;
gas->SP[4][2]=1.0;
gas->SP[5][2]=4.65e-26;
gas->ISPR[1][2]=2;
gas->ISPR[2][2]=0;
gas->SPR[1][2]=5.0;
cout<<"IDEAL_AIR data done"<<endl;
return;
}
//
void REAL_AIR()
{
//GAS gas;
//CALC calc;
cout<<"REAL_AIR data done"<<endl;
gas->MSP=5;
gas->MMRM=1;
gas->MMVM=1;
gas->MELE=5;
gas->MVIBL=40; //?
//
gas->MEX=4;
gas->MMEX=1;
//
gas->MNSR=0;
ALLOCATE_GAS();
//species 1 is oxygen
gas->SP[1][1]=4.07e-10;
gas->SP[2][1]=273.e00;
gas->SP[3][1]=0.77e00;
gas->SP[4][1]=1.e00;
gas->SP[5][1]=5.312e-26;
gas->SP[6][1]=0.e00;
gas->ISPR[1][1]=2;
gas->ISPR[2][1]=0;
gas->SPR[1][1]=5.e00;
gas->ISPV[1]=1; // the number of vibrational modes
gas->SPVM[1][1][1]=2256.e00; // the characteristic vibrational temperature
gas->SPVM[2][1][1]=18000.e00; //90000.D00 // a constant Zv, or the reference Zv
gas->SPVM[3][1][1]=2256.e00; // -1 for a constant Zv, or the reference temperature
gas->SPVM[5][1][1]=1.0;
gas->ISPVM[1][1][1]=3;
gas->ISPVM[2][1][1]=3;
gas->NELL[1]=3;
gas->QELC[1][1][1]=3.0;
gas->QELC[2][1][1]=0.0;
gas->QELC[3][1][1]=50.0;
gas->QELC[1][2][1]=2.0;
gas->QELC[2][2][1]=11393.0;
gas->QELC[3][2][1]=50.0;
gas->QELC[1][3][1]=1.0;
gas->QELC[2][3][1]=18985.0;
gas->QELC[3][3][1]=50.0;
//species 2 is nitrogen
gas->SP[1][2]=4.17e-10;
gas->SP[2][2]=273.e00;
gas->SP[3][2]=0.74e00;
gas->SP[4][2]=1.e00;
gas->SP[5][2]=4.65e-26;
gas->SP[6][2]=0.e00;
gas->ISPR[1][2]=2;
gas->ISPR[2][2]=0;
gas->SPR[1][2]=5.e00;
gas->ISPV[2]=1;
gas->SPVM[1][1][2]=3371.e00;
gas->SPVM[2][1][2]=52000.e00; //260000.D00
gas->SPVM[3][1][2]=3371.e00;
gas->SPVM[5][1][2]=0.3;
gas->ISPVM[1][1][2]=4;
gas->ISPVM[2][1][2]=4;
gas->NELL[2]=1;
gas->QELC[1][1][2]=1.0;
gas->QELC[2][1][2]=0.0;
gas->QELC[3][1][2]=100.0;
//species 3 is atomic oxygen
gas->SP[1][3]=3.e-10;
gas->SP[2][3]=273.e00;
gas->SP[3][3]=0.8e00;
gas->SP[4][3]=1.e00;
gas->SP[5][3]=2.656e-26;
gas->SP[6][3]=4.099e-19;
gas->ISPR[1][3]=0;
gas->ISPV[3]=0;
gas->NELL[3]=5;
gas->QELC[1][1][3]=5.0;
gas->QELC[2][1][3]=0.0;
gas->QELC[3][1][3]=50.0;
gas->QELC[1][2][3]=3.0;
gas->QELC[2][2][3]=228.9;
gas->QELC[3][2][3]=50.0;
gas->QELC[1][3][3]=1.0;
gas->QELC[2][3][3]=325.9;
gas->QELC[3][3][3]=50.0;
gas->QELC[1][4][3]=5.0;
gas->QELC[2][4][3]=22830.0;
gas->QELC[3][4][3]=50.0;
gas->QELC[1][5][3]=1.0;
gas->QELC[2][5][3]=48621.0;
gas->QELC[3][5][3]=50.0;
//species 4 is atomic nitrogen
gas->SP[1][4]=3.e-10;
gas->SP[2][4]=273.e00;
gas->SP[3][4]=0.8e00;
gas->SP[4][4]=1.0e00;
gas->SP[5][4]=2.325e-26;
gas->SP[6][4]=7.849e-19;
gas->ISPR[1][4]=0;
gas->ISPV[4]=0;
gas->NELL[4]=3;
gas->QELC[1][1][4]=4.0;
gas->QELC[2][1][4]=0.0;
gas->QELC[3][1][4]=50.0;
gas->QELC[1][2][4]=10.0;
gas->QELC[2][2][4]=27658.0;
gas->QELC[3][2][4]=50.0;
gas->QELC[1][3][4]=6.0;
gas->QELC[2][3][4]=41495.0;
gas->QELC[3][3][4]=50.0;
//species 5 is NO
gas->SP[1][5]=4.2e-10;
gas->SP[2][5]=273.e00;
gas->SP[3][5]=0.79e00;
gas->SP[4][5]=1.0e00;
gas->SP[5][5]=4.98e-26;
gas->SP[6][5]=1.512e-19;
gas->ISPR[1][5]=2;
gas->ISPR[2][5]=0;
gas->SPR[1][5]=5.e00;
gas->ISPV[5]=1;
gas->SPVM[1][1][5]=2719.e00;
gas->SPVM[2][1][5]=14000.e00; //70000.D00
gas->SPVM[3][1][5]=2719.e00;
gas->SPVM[5][1][5]=0.2;
gas->ISPVM[1][1][5]=3;
gas->ISPVM[2][1][5]=4;
gas->NELL[5]=2;
gas->QELC[1][1][5]=2.0;
gas->QELC[2][1][5]=0.0;
gas->QELC[3][1][5]=50.0;
gas->QELC[1][2][5]=2.0;
gas->QELC[2][2][5]=174.2;
gas->QELC[3][2][5]=50.0;
//set the recombination data for the molecule pairs
//memset(gas->ISPRC,0,sizeof(**gas->ISPRC));//gas->ISPRC=0; //data os zero unless explicitly set
//memset(gas->ISPRK,0,sizeof(**gas->ISPRK));//gas->ISPRK=0;
//memset(gas->SPRC,0,sizeof(****gas->SPRC));//gas->SPRC=0.e00;
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->ISPRC[i][j]=0;
}
}
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->ISPRK[i][j]=0;
}
}
for(int i=0;i<5;i++){
for(int j=0;j<gas->MSP+1;j++){
for(int k=0;k<gas->MSP+1;k++){
for(int l=0;l<gas->MSP+1;l++)
gas->SPEX[i][j][k][l]=0.e00;
}
}
}
gas->ISPRC[3][3]=1; //O+O -> O2 recombined species code for an O+O recombination
gas->ISPRK[3][3]=1;
gas->SPRC[1][3][3][1]=0.04e00;
gas->SPRC[2][3][3][1]=-1.3e00;
gas->SPRC[1][3][3][2]=0.07e00;
gas->SPRC[2][3][3][2]=-1.2e00;
gas->SPRC[1][3][3][3]=0.08e00;
gas->SPRC[2][3][3][3]=-1.2e00;
gas->SPRC[1][3][3][4]=0.09e00;
gas->SPRC[2][3][3][4]=-1.2e00;
gas->SPRC[1][3][3][5]=0.065e00;
gas->SPRC[2][3][3][5]=-1.2e00;
gas->SPRT[1][3][3]=5000.e00;
gas->SPRT[2][3][3]=15000.e00;
gas->ISPRC[4][4]=2; //N+N -> N2
gas->ISPRK[4][4]=1;
gas->SPRC[1][4][4][1]=0.15e00;
gas->SPRC[2][4][4][1]=-2.05e00;
gas->SPRC[1][4][4][2]=0.09e00;
gas->SPRC[2][4][4][2]=-2.1e00;
gas->SPRC[1][4][4][3]=0.16e00;
gas->SPRC[2][4][4][3]=-2.0e00;
gas->SPRC[1][4][4][4]=0.17e00;
gas->SPRC[2][4][4][4]=-2.0e00;
gas->SPRC[1][4][4][5]=0.17e00;
gas->SPRC[2][4][4][5]=-2.1e00;
gas->SPRT[1][4][4]=5000.e00;
gas->SPRT[2][4][4]=15000.e00;
gas->ISPRC[3][4]=5;
gas->ISPRK[3][4]=1;
gas->SPRC[1][3][4][1]=0.3e00;
gas->SPRC[2][3][4][1]=-1.9e00;
gas->SPRC[1][3][4][2]=0.4e00;
gas->SPRC[2][3][4][2]=-2.0e00;
gas->SPRC[1][3][4][3]=0.3e00;
gas->SPRC[2][3][4][3]=-1.75e00;
gas->SPRC[1][3][4][4]=0.3e00;
gas->SPRC[2][3][4][4]=-1.75e00;
gas->SPRC[1][3][4][5]=0.15e00;
gas->SPRC[2][3][4][5]=-1.9e00;
gas->SPRT[1][3][4]=5000.e00;
gas->SPRT[2][3][4]=15000.e00;
//set the exchange reaction data
//memset(gas->SPEX,0,sizeof(****gas->SPEX));//gas->SPEX=0.e00;
for(int i=0;i<7;i++){
for(int j=0;j<gas->MMEX+1;j++){
for(int k=0;k<gas->MSP+1;k++){
for(int l=0;l<gas->MSP+1;l++)
gas->SPEX[i][j][k][l]=0.e00;
}
}
}
gas->ISPEX=0;
gas->NSPEX=0;
gas->NSPEX[2][3]=1;
gas->NSPEX[4][5]=1;
gas->NSPEX[3][5]=1;
gas->NSPEX[1][4]=1;
//N2+O->NO+N
gas->ISPEX[1][1][2][3]=2;
gas->ISPEX[1][2][2][3]=3;
gas->ISPEX[1][3][2][3]=5;
gas->ISPEX[1][4][2][3]=4;
gas->ISPEX[1][5][2][3]=1;
gas->ISPEX[1][6][2][3]=1;
gas->SPEX[6][1][2][3]=0.e00;
gas->NEX[1][2][3]=1;
//NO+N->N2+0
gas->ISPEX[1][1][4][5]=5;
gas->ISPEX[1][2][4][5]=4;
gas->ISPEX[1][3][4][5]=2;
gas->ISPEX[1][4][4][5]=3;
gas->ISPEX[1][5][4][5]=1;
gas->ISPEX[1][6][4][5]=1;
gas->ISPEX[1][7][4][5]=1;
gas->SPEX[1][1][4][5]=0.8e00;
gas->SPEX[2][1][4][5]=-0.75e00;
gas->SPEX[4][1][4][5]=5000.e00;
gas->SPEX[5][1][4][5]=15000.e00;
gas->SPEX[6][1][4][5]=0.e00;
gas->NEX[1][4][5]=2;
//NO+O->O2+N
gas->ISPEX[1][1][3][5]=5;
gas->ISPEX[1][2][3][5]=3;
gas->ISPEX[1][3][3][5]=1;
gas->ISPEX[1][4][3][5]=4;
gas->ISPEX[1][5][3][5]=1;
gas->ISPEX[1][6][3][5]=1;
gas->SPEX[6][1][3][5]=2.e-19;
gas->NEX[1][3][5]=3;
//O2+N->NO+O
gas->ISPEX[1][1][1][4]=1;
gas->ISPEX[1][2][1][4]=4;
gas->ISPEX[1][3][1][4]=5;
gas->ISPEX[1][4][1][4]=3;
gas->ISPEX[1][5][1][4]=1;
gas->ISPEX[1][6][1][4]=1;
gas->ISPEX[1][7][1][4]=1 ;
gas->SPEX[1][1][1][4]=7.e00;
gas->SPEX[2][1][1][4]=-0.85e00;
gas->SPEX[4][1][1][4]=5000.e00;
gas->SPEX[5][1][1][4]=15000.e00;
gas->SPEX[6][1][1][4]=0.e00;
gas->NEX[1][1][4]=4;
DERIVED_GAS_DATA();
cout<<"REAL_AIR data done"<<endl;
return;
}
//
void HELIUM_ARGON_XENON()
{
//GAS gas;
//CALC calc;
cout<<"Reading HELIUM_ARGON_XENON data"<<endl;
gas->MSP=3;
gas->MMRM=0;
gas->MMVM=0;
gas->MNSR=0;
gas->MEX=0;
gas->MMEX=0;
gas->MELE=1;
gas->MVIBL=0;
ALLOCATE_GAS();
gas->SP[1][1]=2.30e-10; //2.33D-10
gas->SP[2][1]=273.0;
gas->SP[3][1]=0.66;
gas->SP[4][1]=0.794; //1.
gas->SP[5][1]=6.65e-27;
gas->ISPR[1][1]=0;
gas->ISPR[2][1]=0;
//
gas->SP[1][2]=4.11e-10; //4.17D-10
gas->SP[2][2]=273.15;
gas->SP[3][2]=0.81;
gas->SP[4][2]=0.714; //1.
gas->SP[5][2]=6.63e-26;
gas->ISPR[1][2]=0;
gas->ISPR[2][2]=0;
//
gas->SP[1][3]=5.65e-10; //5.74D-10
gas->SP[2][3]=273.0;
gas->SP[3][3]=0.85;
gas->SP[4][3]=0.694; //1.
gas->SP[5][3]=21.8e-26;
gas->ISPR[1][3]=0;
gas->ISPR[2][3]=0;
cout<<"HELIUM_ARGON_XENON data done"<<endl;
return;
}
//
void OXYGEN_HYDROGEN()
{
//
//GAS gas;
//CALC calc;
cout<<"Reading OXYGEN_HYDROGEN data"<<endl;
gas->MSP=8;
gas->MMRM=3;
gas->MMVM=3;
gas->MELE=1;
gas->MVIBL=40; //the maximum number of vibrational levels before a cumulative level reaches 1
//
gas->MEX=16;
gas->MMEX=3;
//
gas->MNSR=0;
//
ALLOCATE_GAS();
//
//species 1 is hydrogen H2
gas->SP[1][1]=2.92e-10;
gas->SP[2][1]=273.e00;
gas->SP[3][1]=0.67e00;
gas->SP[4][1]=1.e00;
gas->SP[5][1]=3.34e-27;
gas->SP[6][1]=0.e00;
gas->ISPR[1][1]=2;
gas->ISPR[2][1]=0;
gas->SPR[1][1]=5.e00;
gas->ISPV[1]=1; // the number of vibrational modes
gas->SPVM[1][1][1]=6159.e00; // the characteristic vibrational temperature
gas->SPVM[2][1][1]=20000.e00; //estimate
gas->SPVM[3][1][1]=2000.e00; //estimate
gas->SPVM[5][1][1]=1.0;
gas->ISPVM[1][1][1]=2;
gas->ISPVM[2][1][1]=2;
//species 2 is atomic hydrogen H
gas->SP[1][2]=2.5e-10; //estimate
gas->SP[2][2]=273.e00;
gas->SP[3][2]=0.8e00;
gas->SP[4][2]=1.e00;
gas->SP[5][2]=1.67e-27;
gas->SP[6][2]=3.62e-19;
gas->ISPR[1][2]=0;
gas->ISPV[2]=0;
//species 3 is oxygen O2
gas->SP[1][3]=4.07e-10;
gas->SP[2][3]=273.e00;
gas->SP[3][3]=0.77e00;
gas->SP[4][3]=1.e00;
gas->SP[5][3]=5.312e-26;
gas->SP[6][3]=0.e00;
gas->ISPR[1][3]=2;
gas->ISPR[2][3]=0;
gas->SPR[1][3]=5.e00;
gas->ISPV[3]=1; // the number of vibrational modes
gas->SPVM[1][1][3]=2256.e00; // the characteristic vibrational temperature
gas->SPVM[2][1][3]=18000.e00; //90000.D00 // a constant Zv, or the reference Zv
gas->SPVM[3][1][3]=2256.e00; // -1 for a constant Zv, or the reference temperature
gas->SPVM[5][1][3]=1.e00;
gas->ISPVM[1][1][3]=4;
gas->ISPVM[2][1][3]=4;
//species 4 is atomic oxygen O
gas->SP[1][4]=3.e-10; //estimate
gas->SP[2][4]=273.e00;
gas->SP[3][4]=0.8e00;
gas->SP[4][4]=1.e00;
gas->SP[5][4]=2.656e-26;
gas->SP[6][4]=4.099e-19;
gas->ISPR[1][4]=0;
gas->ISPV[4]=0;
//species 5 is hydroxy OH
gas->SP[1][5]=4.e-10; //estimate
gas->SP[2][5]=273.e00;
gas->SP[3][5]=0.75e00; //-estimate
gas->SP[4][5]=1.0e00;
gas->SP[5][5]=2.823e-26;
gas->SP[6][5]=6.204e-20;
gas->ISPR[1][5]=2;
gas->ISPR[2][5]=0;
gas->SPR[1][5]=5.e00;
gas->ISPV[5]=1;
gas->SPVM[1][1][5]=5360.e00;
gas->SPVM[2][1][5]=20000.e00; //estimate
gas->SPVM[3][1][5]=2500.e00; //estimate
gas->SPVM[5][1][5]=1.0e00;
gas->ISPVM[1][1][5]=2;
gas->ISPVM[2][1][5]=4;
//species 6 is water vapor H2O
gas->SP[1][6]=4.5e-10; //estimate
gas->SP[2][6]=273.e00;
gas->SP[3][6]=0.75e00 ; //-estimate
gas->SP[4][6]=1.0e00;
gas->SP[5][6]=2.99e-26;
gas->SP[6][6]=-4.015e-19;
gas->ISPR[1][6]=3;
gas->ISPR[2][6]=0;
gas->SPR[1][6]=5.e00;
gas->ISPV[6]=3;
gas->SPVM[1][1][6]=5261.e00; //symmetric stretch mode
gas->SPVM[2][1][6]=20000.e00; //estimate
gas->SPVM[3][1][6]=2500.e00; //estimate
gas->SPVM[5][1][6]=1.e00;
gas->SPVM[1][2][6]=2294.e00; //bend mode
gas->SPVM[2][2][6]=20000.e00; //estimate
gas->SPVM[3][2][6]=2500.e00; //estimate
gas->SPVM[5][2][6]=1.0e00;
gas->SPVM[1][3][6]=5432.e00; //asymmetric stretch mode
gas->SPVM[2][3][6]=20000.e00; //estimate
gas->SPVM[3][3][6]=2500.e00 ; //estimate
gas->SPVM[5][3][6]=1.e00;
gas->ISPVM[1][1][6]=2;
gas->ISPVM[2][1][6]=5;
gas->ISPVM[1][2][6]=2;
gas->ISPVM[2][2][6]=5;
gas->ISPVM[1][3][6]=2;
gas->ISPVM[2][3][6]=5;
//species 7 is hydroperoxy HO2
gas->SP[1][7]=5.5e-10; //estimate
gas->SP[2][7]=273.e00;
gas->SP[3][7]=0.75e00 ; //-estimate
gas->SP[4][7]=1.0e00;
gas->SP[5][7]=5.479e-26;
gas->SP[6][7]=2.04e-20;
gas->ISPR[1][7]=2; //assumes that HO2 is linear
gas->ISPR[2][7]=0;
gas->SPR[1][7]=5.e00;
gas->ISPV[7]=3;
gas->SPVM[1][1][7]=4950.e00;
gas->SPVM[2][1][7]=20000.e00; //estimate
gas->SPVM[3][1][7]=2500.e00 ; //estimate
gas->SPVM[5][1][7]=1.e00;
gas->SPVM[1][2][7]=2000.e00;
gas->SPVM[2][2][7]=20000.e00; //estimate
gas->SPVM[3][2][7]=2500.e00; //estimate
gas->SPVM[5][2][7]=1.e00;
gas->SPVM[1][3][7]=1580.e00;
gas->SPVM[2][3][7]=20000.e00; //estimate
gas->SPVM[3][3][7]=2500.e00; //estimate
gas->SPVM[5][3][7]=1.e00;
gas->ISPVM[1][1][7]=2;
gas->ISPVM[2][1][7]=3;
gas->ISPVM[1][2][7]=2;
gas->ISPVM[2][2][7]=3;
gas->ISPVM[1][3][7]=2;
gas->ISPVM[2][3][7]=3;
//Species 8 is argon
gas->SP[1][8]=4.17e-10;
gas->SP[2][8]=273.15;
gas->SP[3][8]=0.81 ;
gas->SP[4][8]=1.0;
gas->SP[5][8]=6.63e-26;
gas->SP[6][8]=0.e00;
gas->ISPR[1][8]=0;
gas->ISPV[8]=0;
//
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->ISPRC[i][j]=0;
}
}
//gas->ISPRC=0; //data is zero unless explicitly set
//
gas->ISPRC[4][4]=3; //O+O+M -> O2+M recombined species code for an O+O recombination
gas->ISPRK[4][4]=1;
gas->SPRC[1][4][4][1]=0.26e00;
gas->SPRC[2][4][4][1]=-1.3e00;
gas->SPRC[1][4][4][2]=0.29e00;
gas->SPRC[2][4][4][2]=-1.3e00;
gas->SPRC[1][4][4][3]=0.04e00;
gas->SPRC[2][4][4][3]=-1.5e00;
gas->SPRC[1][4][4][4]=0.1e00;
gas->SPRC[2][4][4][4]=-1.4e00;
gas->SPRC[1][4][4][5]=0.1e00;
gas->SPRC[2][4][4][5]=-1.4e00;
gas->SPRC[1][4][4][6]=0.1e00;
gas->SPRC[2][4][4][6]=-1.4e00;
gas->SPRC[1][4][4][7]=0.07e00;
gas->SPRC[2][4][4][7]=-1.5e00;
gas->SPRC[1][4][4][8]=0.07e00;
gas->SPRC[2][4][4][8]=-1.5e00;
gas->SPRT[1][4][4]=1000.e00;
gas->SPRT[2][4][4]=3000.e00;
//
gas->ISPRC[2][2]=1; //H+H+M -> H2+M
gas->ISPRK[2][2]=1;
gas->SPRC[1][2][2][1]=0.07e00;
gas->SPRC[2][2][2][1]=-2.e00;
gas->SPRC[1][2][2][2]=0.11e00;
gas->SPRC[2][2][2][2]=-2.2e00;
gas->SPRC[1][2][2][3]=0.052e00;
gas->SPRC[2][2][2][3]=-2.5e00;
gas->SPRC[1][2][2][4]=0.052e00;
gas->SPRC[2][2][2][4]=-2.5e00;
gas->SPRC[1][2][2][5]=0.052e00;
gas->SPRC[2][2][2][5]=-2.5e00;
gas->SPRC[1][2][2][6]=0.052e00;
gas->SPRC[2][2][2][6]=-2.5e00;
gas->SPRC[1][2][2][7]=0.052e00;
gas->SPRC[2][2][2][7]=-2.5e00;
gas->SPRC[1][2][2][8]=0.04e00;
gas->SPRC[2][2][2][7]=-2.5e00;
gas->SPRT[1][2][2]=1000.e00;
gas->SPRT[2][2][2]=3000.e00;
//
gas->ISPRC[2][4]=5; //H+0+M -> OH+M
gas->ISPRK[2][4]=1;
gas->SPRC[1][2][4][1]=0.15e00;
gas->SPRC[2][2][4][1]=-2.e00;
gas->SPRC[1][2][4][2]=0.04e00;
gas->SPRC[2][2][4][2]=-1.3e00;
gas->SPRC[1][2][4][3]=0.04e00;
gas->SPRC[2][2][4][3]=-1.3e00;
gas->SPRC[1][2][4][4]=0.04e00;
gas->SPRC[2][2][4][4]=-1.3e00;
gas->SPRC[1][2][4][5]=0.04e00;
gas->SPRC[2][2][4][5]=-1.3e00;
gas->SPRC[1][2][4][6]=0.21e00;
gas->SPRC[2][2][4][6]=-2.1e00;
gas->SPRC[1][2][4][7]=0.18e00;
gas->SPRC[2][2][4][7]=-2.3e00;
gas->SPRC[1][2][4][8]=0.16e00;
gas->SPRC[2][2][4][8]=-2.3e00;
gas->SPRT[1][2][4]=1000.e00;
gas->SPRT[2][2][4]=3000.e00;
//
gas->ISPRC[2][5]=6; //H+OH+M -> H2O+M
gas->ISPRK[2][5]=1;
gas->SPRC[1][2][5][1]=0.1e00;
gas->SPRC[2][2][5][1]=-2.0e00;
gas->SPRC[1][2][5][2]=0.1e00;
gas->SPRC[2][2][5][2]=-2.0e00;
gas->SPRC[1][2][5][3]=0.0025e00;
gas->SPRC[2][2][5][3]=-2.2e00;
gas->SPRC[1][2][5][4]=0.0025e00;
gas->SPRC[2][2][5][4]=-2.2e00;
gas->SPRC[1][2][5][5]=0.0025e00;
gas->SPRC[2][2][5][5]=-2.2e00;
gas->SPRC[1][2][5][6]=0.0015e00;
gas->SPRC[2][2][5][6]=-2.2e00;
gas->SPRC[1][2][5][7]=0.0027e00;
gas->SPRC[2][2][5][7]=-2.e00;
gas->SPRC[1][2][5][8]=0.0025e00;
gas->SPRC[2][2][5][8]=-2.e00;
gas->SPRT[1][2][5]=1000.e00;
gas->SPRT[2][2][5]=3000.e00;
//
gas->ISPRC[2][3]=7; //H+O2+M -> H02+M
gas->ISPRK[2][3]=1;
gas->SPRC[1][2][3][1]=0.0001e00;
gas->SPRC[2][2][3][1]=-1.7e00;
gas->SPRC[1][2][3][2]=0.0001e00;
gas->SPRC[2][2][3][2]=-1.7e00;
gas->SPRC[1][2][3][3]=0.00003e00;
gas->SPRC[2][2][3][3]=-1.5e00;
gas->SPRC[1][2][3][4]=0.00003e00;
gas->SPRC[2][2][3][4]=-1.7e00;
gas->SPRC[1][2][3][5]=0.00003e00;
gas->SPRC[2][2][3][5]=-1.7e00;
gas->SPRC[1][2][3][6]=0.00003e00;
gas->SPRC[2][2][3][6]=-1.7e00;
gas->SPRC[1][2][3][7]=0.000012e00;
gas->SPRC[2][2][3][7]=-1.7e00;
gas->SPRC[1][2][3][8]=0.00002e00;
gas->SPRC[2][2][3][8]=-1.7e00;
gas->SPRT[1][2][3]=1000.e00;
gas->SPRT[2][2][3]=3000.e00;
//
//set the exchange reaction data
// memset(gas->SPEX,0,sizeof(****gas->SPEX));//gas->SPEX=0.e00; //all activation energies and heats of reaction are zero unless set otherwise
for(int i=0;i<7;i++){
for(int j=0;j<gas->MMEX+1;j++){
for(int k=0;k<gas->MSP+1;k++){
for(int l=0;l<gas->MSP+1;l++)
gas->SPEX[i][j][k][l]=0.e00;
}
}
}
//gas->ISPEX=0; // ISPEX is also zero unless set otherwise
for(int i=0;i<gas->MMEX+1;i++){
for(int j=0;j<8;j++){
for(int k=0;k<gas->MSP+1;k++){
for(int l=0;l<gas->MSP+1;l++)
gas->ISPEX[i][j][k][l]=0.e00;
}
}
}
//gas->NSPEX=0;
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->NSPEX[i][j]=0;
}
}
//set the number of exchange reactions for each species pair
gas->NSPEX[1][3]=1;
gas->NSPEX[2][7]=3;
gas->NSPEX[2][3]=1;
gas->NSPEX[4][5]=1;
gas->NSPEX[1][4]=1;
gas->NSPEX[2][5]=1;
gas->NSPEX[1][5]=1;
gas->NSPEX[2][6]=1;
gas->NSPEX[4][6]=2;
gas->NSPEX[5][5]=2;
gas->NSPEX[4][7]=1;
gas->NSPEX[3][5]=1;
//set the information on the chain reactions
//
//H2+O2 -> HO2+H
gas->ISPEX[1][1][1][3]=1;
gas->ISPEX[1][2][1][3]=3;
gas->ISPEX[1][3][1][3]=7;
gas->ISPEX[1][4][1][3]=2;
gas->ISPEX[1][5][1][3]=1;
gas->ISPEX[1][6][1][3]=1;
gas->SPEX[6][1][1][3]=0.e00;
gas->NEX[1][1][3]=1;
//
//HO2+H -> H2+02
gas->ISPEX[1][1][2][7]=7;
gas->ISPEX[1][2][2][7]=2;
gas->ISPEX[1][3][2][7]=1;
gas->ISPEX[1][4][2][7]=3;
gas->ISPEX[1][5][2][7]=1;
gas->ISPEX[1][6][2][7]=1;
gas->ISPEX[1][7][2][7]=1;
//H02 is H-O-O so that not all vibrational modes contribute to this reaction, but the numbers here are guesses//
gas->SPEX[1][1][2][7]=20.e00;
gas->SPEX[2][1][2][7]=0.4e00;
gas->SPEX[4][1][2][7]=2000.e00;
gas->SPEX[5][1][2][7]=3000.e00;
gas->SPEX[6][1][2][7]=0.e00;
gas->NEX[1][2][7]=2;
//
//O2+H -> OH+O
gas->ISPEX[1][1][2][3]=3;
gas->ISPEX[1][2][2][3]=2;
gas->ISPEX[1][3][2][3]=5;
gas->ISPEX[1][4][2][3]=4;
gas->ISPEX[1][5][2][3]=1;
gas->ISPEX[1][6][2][3]=1;
gas->SPEX[6][1][2][3]=0.e00;
gas->NEX[1][2][3]=3;
//
//OH+O -> O2+H
gas->ISPEX[1][1][4][5]=5;
gas->ISPEX[1][2][4][5]=4;
gas->ISPEX[1][3][4][5]=3;
gas->ISPEX[1][4][4][5]=2;
gas->ISPEX[1][5][4][5]=1;
gas->ISPEX[1][6][4][5]=1;
gas->ISPEX[1][7][4][5]=1;
gas->SPEX[1][1][4][5]=0.65e00;
gas->SPEX[2][1][4][5]=-0.26;
gas->SPEX[4][1][4][5]=2000.e00;
gas->SPEX[5][1][4][5]=3000.e00;
gas->SPEX[6][1][4][5]=0.e00;
gas->NEX[1][4][5]=4;
//
//H2+O -> OH+H
gas->ISPEX[1][1][1][4]=1;
gas->ISPEX[1][2][1][4]=4;
gas->ISPEX[1][3][1][4]=5;
gas->ISPEX[1][4][1][4]=2;
gas->ISPEX[1][5][1][4]=1;
gas->ISPEX[1][6][1][4]=1;
gas->SPEX[6][1][1][4]=0.e00;
gas->NEX[1][1][4]=5;
//
//OH+H -> H2+O
gas->ISPEX[1][1][2][5]=5;
gas->ISPEX[1][2][2][5]=2;
gas->ISPEX[1][3][2][5]=1;
gas->ISPEX[1][4][2][5]=4;
gas->ISPEX[1][5][2][5]=1;
gas->ISPEX[1][6][2][5]=1;
gas->ISPEX[1][7][2][5]=1;
gas->SPEX[1][1][2][5]=0.5e00;
gas->SPEX[2][1][2][5]=-0.2e00;
gas->SPEX[4][1][2][5]=2000.e00;
gas->SPEX[5][1][2][5]=3000.e00;
gas->SPEX[6][1][2][5]=0.e00;
gas->NEX[1][2][5]=6;
//
//H20+H -> OH+H2
gas->ISPEX[1][1][2][6]=6;
gas->ISPEX[1][2][2][6]=2;
gas->ISPEX[1][3][2][6]=5;
gas->ISPEX[1][4][2][6]=1;
gas->ISPEX[1][5][2][6]=1;
gas->ISPEX[1][6][2][6]=1;
gas->SPEX[6][1][2][6]=2.0e-19;
gas->NEX[1][2][6]=7;
//OH+H2 -> H2O+H
gas->ISPEX[1][1][1][5]=5;
gas->ISPEX[1][2][1][5]=1;
gas->ISPEX[1][3][1][5]=6;
gas->ISPEX[1][4][1][5]=2;
gas->ISPEX[1][5][1][5]=1;
gas->ISPEX[1][6][1][5]=1;
gas->ISPEX[1][7][1][5]=1;
gas->SPEX[1][1][1][5]=0.5;
gas->SPEX[2][1][1][5]=-0.2;
gas->SPEX[4][1][1][5]=2000.e00;
gas->SPEX[5][1][1][5]=3000.e00;
gas->SPEX[6][1][1][5]=0.e00;
gas->NEX[1][1][5]=8;
//
//H2O+O -> OH+OH
gas->ISPEX[1][1][4][6]=6;
gas->ISPEX[1][2][4][6]=4;
gas->ISPEX[1][3][4][6]=5;
gas->ISPEX[1][4][4][6]=5;
gas->ISPEX[1][5][4][6]=1;
gas->ISPEX[1][6][4][6]=1;
gas->SPEX[6][1][4][6]=0.e00;
gas->NEX[1][4][6]=9;
//
//0H+OH -> H2O+O
gas->ISPEX[1][1][5][5]=5;
gas->ISPEX[1][2][5][5]=5;
gas->ISPEX[1][3][5][5]=6;
gas->ISPEX[1][4][5][5]=4;
gas->ISPEX[1][5][5][5]=1;
gas->ISPEX[1][6][5][5]=1;
gas->ISPEX[1][7][5][5]=1;
gas->SPEX[1][1][5][5]=0.35;
gas->SPEX[2][1][5][5]=-0.2 ;
gas->SPEX[4][1][5][5]=2000.e00;
gas->SPEX[5][1][5][5]=3000.e00;
gas->SPEX[6][1][5][5]=0.e00;
gas->NEX[1][5][5]=10;
//
//OH+OH -> HO2+H
//
gas->ISPEX[2][1][5][5]=5;
gas->ISPEX[2][2][5][5]=5;
gas->ISPEX[2][3][5][5]=7;
gas->ISPEX[2][4][5][5]=2;
gas->ISPEX[2][5][5][5]=1;
gas->ISPEX[2][6][5][5]=1;
gas->SPEX[6][2][5][5]=0.e00;
gas->NEX[2][5][5]=11;
//
//H02+H -> 0H+OH
gas->ISPEX[2][1][2][7]=7;
gas->ISPEX[2][2][2][7]=2;
gas->ISPEX[2][3][2][7]=5;
gas->ISPEX[2][4][2][7]=5;
gas->ISPEX[2][5][2][7]=1;
gas->ISPEX[2][6][2][7]=1;
gas->ISPEX[2][7][2][7]=1;
gas->SPEX[1][2][2][7]=120.e00;
gas->SPEX[2][2][2][7]=-0.05e00;
gas->SPEX[4][2][2][7]=2000.e00;
gas->SPEX[5][2][2][7]=3000.e00;
gas->SPEX[6][2][2][7]=0.e00;
gas->NEX[2][2][7]=12;
//
//H2O+O -> HO2+H
//
gas->ISPEX[2][1][4][6]=6;
gas->ISPEX[2][2][4][6]=4;
gas->ISPEX[2][3][4][6]=7;
gas->ISPEX[2][4][4][6]=2;
gas->ISPEX[2][5][4][6]=1;
gas->ISPEX[2][6][4][6]=1;
gas->SPEX[6][2][4][6]=0.e00;
gas->NEX[2][4][6]=13;
//
//H02+H -> H2O+O
//
gas->ISPEX[3][1][2][7]=7;
gas->ISPEX[3][2][2][7]=2;
gas->ISPEX[3][3][2][7]=6;
gas->ISPEX[3][4][2][7]=4;
gas->ISPEX[3][5][2][7]=1;
gas->ISPEX[3][6][2][7]=1;
gas->ISPEX[3][7][2][7]=1;
gas->SPEX[1][3][2][7]=40.e00;
gas->SPEX[2][3][2][7]=-1.e00;
gas->SPEX[4][3][2][7]=2000.e00;
gas->SPEX[5][3][2][7]=3000.e00;
gas->SPEX[6][3][2][7]=0.e00;
gas->NEX[3][2][7]=14;
//
//OH+O2 -> HO2+O
//
gas->ISPEX[1][1][3][5]=5;
gas->ISPEX[1][2][3][5]=3;
gas->ISPEX[1][3][3][5]=7;
gas->ISPEX[1][4][3][5]=4;
gas->ISPEX[1][5][3][5]=1;
gas->ISPEX[1][6][3][5]=1;
gas->SPEX[6][1][3][5]=0.e00;
gas->NEX[1][3][5]=15;
//
//H02+0 -> OH+O2
//
gas->ISPEX[1][1][4][7]=7;
gas->ISPEX[1][2][4][7]=4;
gas->ISPEX[1][3][4][7]=5;
gas->ISPEX[1][4][4][7]=3;
gas->ISPEX[1][5][4][7]=1;
gas->ISPEX[1][6][4][7]=1;
gas->ISPEX[1][7][4][7]=1;
gas->SPEX[1][1][4][7]=100.e00;
gas->SPEX[2][1][4][7]=0.15e00;
gas->SPEX[4][1][4][7]=2000.e00;
gas->SPEX[5][1][4][7]=3000.e00;
gas->SPEX[6][1][4][7]=0.e00;
gas->NEX[1][4][7]=16;
//
DERIVED_GAS_DATA();
//
cout<<"OXYGEN_HYDROGEN data done"<<endl;
return;
}
//***************************************************************************
//*************************END OF GAS DATABASE*******************************
//***************************************************************************
//
void DERIVED_GAS_DATA()
{
//
//GAS gas;
//CALC calc;
int I,II,J,JJ,K,L,M,MM,N,JMAX,MOLSP,MOLOF,NSTEP,IMAX;
double A,B,BB,C,X,T,CUR,EAD,TVD,ZVT,ERD,PETD,DETD,PINT,ETD,SUMD,VAL;
double **BFRAC,**TOT;
double ****VRRD;
double *****VRREX;
//
//VRRD(1,L,M,K) dissociation rate coefficient to species L,M for vibrational level K at 5,000 K
//VRRD(2,L,M,K) similar for 15,000 K
//VRREX(1,J,L,M,K) Jth exchange rate coefficient to species L,M for vibrational level K at 1,000 K
//VRREX(2,J,L,M,K) similar for 3,000 K
//BFRAC(2,J) Boltzmann fraction
//JMAX imax-1
//T temperature
//CUR sum of level resolved rates
//
VRRD = new double ***[3];
for (int i = 0; i < 3; ++i)
{
VRRD[i] = new double **[gas->MSP+1];
for (int j = 0; j < gas->MSP+1; ++j)
{
VRRD[i][j] = new double *[gas->MSP+1];
for(int k=0; k<gas->MSP+1; ++k)
VRRD[i][j][k]=new double [gas->MVIBL+1];
}
}
BFRAC = new double*[gas->MVIBL+1];
for(int i =0; i< (gas->MVIBL+1); ++i)
BFRAC[i] = new double[3];
VRREX = new double ****[3];
for (int i = 0; i < 3; ++i)
{
VRREX[i] = new double ***[gas->MMEX+1];
for (int j = 0; j < gas->MMEX+1; ++j)
{
VRREX[i][j] = new double **[gas->MSP+1];
for(int k=0; k<gas->MSP+1; ++k)
{
VRREX[i][j][k]=new double *[gas->MSP+1];
for(int l=0; l<gas->MSP+1; ++l)
VRREX[i][j][k][l]= new double[gas->MVIBL+1];
}
}
}
TOT = new double*[gas->MVIBL+1];
for(int i =0; i< (gas->MVIBL+1); ++i)
TOT[i] = new double[3];
// ALLOCATE (VRRD(2,MSP,MSP,0:MVIBL),BFRAC(0:MVIBL,2),VRREX(2,MMEX,MSP,MSP,0:MVIBL),TOT(0:MVIBL,2),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*)'PROGRAM COULD NOT ALLOCATE VIB. RES. DISS. RATES',ERROR
// END IF
//
cout<<"Setting derived gas data"<<endl;
//copy the L,M data that has been specified for L < M so that it applies also for M>L
for(L=1;L<=gas->MSP;L++){
for(M=1;M<=gas->MSP;M++){
if(L > M){
gas->NSPEX[L][M]=gas->NSPEX[M][L];
gas->ISPRC[L][M]=gas->ISPRC[M][L];
gas->ISPRK[L][M]=gas->ISPRK[M][L];
for(K=1;K<=gas->MSP;K++){
gas->SPRT[1][L][M]=gas->SPRT[1][M][L];
gas->SPRT[2][L][M]=gas->SPRT[2][M][L];
gas->SPRC[1][L][M][K]=gas->SPRC[1][M][L][K];
gas->SPRC[2][L][M][K]=gas->SPRC[2][M][L][K];
}
for(K=1;K<=gas->MMEX;K++){
gas->NEX[K][L][M]=gas->NEX[K][M][L];
for(J=1;J<=6;J++){
gas->SPEX[J][K][L][M]=gas->SPEX[J][K][M][L];
}
for(J=1;J<=7;J++){
gas->ISPEX[K][J][L][M]=gas->ISPEX[K][J][M][L];
}
}
}
}
}
//
if(gas->MMVM > 0){
//set the characteristic dissociation temperatures
for(L=1;L<=gas->MSP;L++){
if(gas->ISPV[L] > 0){
for(K=1;K<=gas->ISPV[L];K++)
{
I=gas->ISPVM[1][K][L];
J=gas->ISPVM[2][K][L];
gas->SPVM[4][K][L]=(gas->SP[6][I]+gas->SP[6][J]-gas->SP[6][L])/BOLTZ;
//WRITE (9,*) 'Char. Diss temp of species',L,' is',SPVM(4,K,L)
file_9<<"Char. Diss temp of species "<<L<<" is "<<gas->SPVM[4][K][L]<<endl;
}
}
}
}
//
if(gas->MMEX > 0){
//set the heats of reaction of the exchange and chain reactions
for(L=1;L<=gas->MSP;L++){
for(M=1;M<=gas->MSP;M++){
for(J=1;J<=gas->MMEX;J++){
if((gas->ISPEX[J][3][L][M]> 0) && (gas->ISPEX[J][4][L][M]>0) && (gas->ISPEX[J][1][L][M]>0) && (gas->ISPEX[J][2][L][M]>0)){
gas->SPEX[3][J][L][M]=gas->SP[6][gas->ISPEX[J][1][L][M]]+gas->SP[6][gas->ISPEX[J][2][L][M]]-gas->SP[6][gas->ISPEX[J][3][L][M]]-gas->SP[6][gas->ISPEX[J][4][L][M]];
// WRITE (9,*) 'Reaction',NEX(J,L,M),' heat of reaction',SPEX(3,J,L,M)
file_9<<"Reaction "<<gas->NEX[J][L][M]<<" heat of reaction"<<gas->SPEX[3][J][L][M]<<endl;
}
}
}
}
}
//
if(gas->MELE > 1){
//set the electronic cross-section ratios to a mean electronic relaxation collision number
//(equipartition is not achieved unless there is a single number)
for(L=1;L<=gas->MSP;L++){
A=0.e00;
for(K=1;K<=gas->NELL[L];K++){
A=A+gas->QELC[3][K][L];
}
gas->QELC[3][1][L]=A/double(gas->NELL[L]);
}
}
//
//set the cumulative distributions of the post-recombination vibrational distributions for establishment of detailed balance
for(L=1;L<=gas->MSP;L++){
for(M=1;M<=gas->MSP;M++){
if(gas->ISPRC[L][M] > 0){
N=gas->ISPRC[L][M]; //recombined species
K=gas->ISPRK[L][M]; //relevant vibrational mode
//WRITE (9,*) 'SPECIES',L,M,' RECOMBINE TO',N
file_9<<"SPECIES "<<L<<" "<<M<<" RECOMBINE TO"<<N<<endl;
JMAX=gas->SPVM[4][K][N]/gas->SPVM[1][K][N];
if(JMAX > gas->MVIBL){
cout<<" The variable MVIBL="<<gas->MVIBL<<" in the gas database must be increased to"<<JMAX<<endl;
cout<<"Enter 0 ENTER to stop";
cin>> A;
return ;
}
A=2.5e00-gas->SP[3][N];
for(I=1;I<=2;I++){
if(I == 1) T=gas->SPRT[1][L][M];
if(I == 2) T=gas->SPRT[2][L][M];
//WRITE (9,*) 'TEMPERATURE',T
file_9<<"TEMPERATURE "<<T<<endl;
CUR=0.e00;
for(J=0;J<=JMAX;J++){
X=double(JMAX+1-J)*gas->SPVM[1][K][N]/T;
CQAX(A,X,B);
VRRD[I][L][M][J]=B*exp(-double(J)*gas->SPVM[1][K][N]/T);
CUR=CUR+VRRD[I][L][M][J];
}
B=0.e00;
for(J=0;J<=JMAX;J++){
B=B+VRRD[I][L][M][J]/CUR;
gas->SPRP[I][L][M][J]=B;
//WRITE (9,*) 'CDF level dissoc',J,SPRP(I,L,M,J)
file_9<< "CDF level dissoc "<<J<<" "<<gas->SPRP[I][L][M][J];
}
}
}
}
}
//
//READ (*,*) //optionally pause program to check cumulative distributions for exchange and chain reactions
//
//set the cumulative distributions of the post-reverse vibrational distributions for establishment of detailed balance
for(L=1;L<=gas->MSP;L++){
for(M=1;M<=gas->MSP;M++){
if(gas->NSPEX[L][M] > 0){
for(K=1;K<=gas->NSPEX[L][M];K++){
if(gas->SPEX[3][K][L][M] > 0.e00){ //exothermic (reverse) exchange reaction
//L,M are the species in the reverse reaction, E_a of forward reaction is SPEX(3,K,L,M)
//WRITE (9,*) 'SPECIES',L,M,' REVERSE REACTION'
file_9<<"SPECIES "<<L<<" "<<M<<" REVERSE REACTION"<<endl;
MOLSP=gas->ISPEX[K][3][L][M]; //molecuke that splits in the forward reaction
MOLOF=gas->ISPEX[K][4][L][M];
JMAX=(gas->SPEX[3][K][L][M]+gas->SPEX[6][K][MOLSP][MOLOF])/(BOLTZ*gas->SPVM[1][gas->ISPEX[K][5][L][M]][MOLSP])+15; //should always be less than the JMAX set by dissociation reactions
for(I=1;I<=2;I++){
if(I == 1) T=gas->SPEX[4][K][L][M];
if(I == 2) T=gas->SPEX[5][K][L][M];
for(J=0;J<=JMAX;J++){
EAD=(gas->SPEX[3][K][L][M]+gas->SPEX[6][K][MOLSP][MOLOF])/(BOLTZ*T);
TVD=gas->SPVM[1][gas->ISPEX[K][5][L][M]][MOLSP]/T;
ZVT=1.e00/(1.e00-exp(-TVD));
C=ZVT/(tgamma(2.5e00-gas->SP[3][MOLSP])*exp(-EAD)); //coefficient of integral
ERD=EAD-double(J)*TVD;
if(ERD < 0.e00) ERD=0.e00;
PETD=ERD;
DETD=0.01e00;
PINT=0.e00; //progressive value of integral
NSTEP=0;
A=1.e00;
while(A > 1.e-10){
NSTEP=NSTEP+1;
ETD=PETD+0.5e00*DETD;
SUMD=0.e00; //normalizing sum in the denominator
IMAX=ETD/TVD+J;
for(II=0;II<=IMAX;II++){
SUMD=SUMD+pow((1.e00-double(II)*TVD/(ETD+double(J)*TVD)),(1.5e00-gas->SP[3][MOLSP]));
}
VAL=(pow((ETD*(1.e00-EAD/(ETD+double(J)*TVD))),(1.5e00-gas->SP[3][MOLSP]))/SUMD)*exp(-ETD);
PINT=PINT+VAL*DETD;
A=VAL/PINT;
PETD=ETD+0.5e00*DETD;
}
VRREX[I][K][L][M][J]=C*PINT;
// WRITE (*,*) 'Level ratio exch',I,J,VRREX(I,K,L,M,J)
}
}
//
//memset(TOT,0.e00,sizeof(**TOT));//TOT=0.e00;
for(int i=0;i<gas->MVIBL+1;i++){
for(int j=0;j<gas->MVIBL+1;j++){
TOT[i][j]=0;
}
}
for(I=1;I<=2;I++){
if(I == 1) T=gas->SPEX[4][K][L][M];
if(I == 2) T=gas->SPEX[5][K][L][M];
for(J=0;J<=JMAX;J++){
TVD=gas->SPVM[1][gas->ISPEX[K][5][L][M]][MOLSP]/T;
ZVT=1.e00/(1.e00-exp(-TVD));
BFRAC[J][I]=exp(-J*gas->SPVM[1][gas->ISPEX[K][5][L][M]][MOLSP]/T)/ZVT; //Boltzmann fraction
VRREX[I][K][L][M][J]=VRREX[I][K][L][M][J]*BFRAC[J][I];
// WRITE (*,*) 'Contribution',I,J,VRREX(I,K,L,M,J)
for(MM=0;MM<=J;MM++)
TOT[J][I]=TOT[J][I]+VRREX[I][K][L][M][MM];
}
}
//
for(I=1;I<=2;I++){
for(J=0;J<=JMAX;J++){
gas->SPREX[I][K][L][M][J]=TOT[J][I];
if(J == JMAX) gas->SPREX[I][K][L][M][J]=1.e00;
//WRITE (9,*) 'Cumulative',I,J,SPREX(I,K,L,M,J)
file_9<<"Cumulative "<<I<<" "<<J<<" "<<gas->SPREX[I][K][L][M][J];
}
}
}
}
gas->NSLEV=0;
//memset(gas->SLER,0.e00,sizeof(*gas->SLER));//gas->SLER=0.e00;
for(int i=0;i<gas->MSP+1;i++)
gas->SLER[i]=0.e00;
}
}
}
//
//READ (*,*) //optionally pause program to check cumulative distributions for exchange abd chain reactions
return;
}
void READ_DATA()
{
//CALC calc;
//MOLECS molecs;
//GAS gas;
//OUTPUT output;
//GEOM_1D geom;
fstream file_3;
fstream file_4;
int NVERD,MVERD,N,K;
if(calc->ICLASS==0)
{
cout<<"Reading the data file DS0D.DAT"<<endl;
file_4.open("DS0D.DAT", ios::in);
file_3.open("DS0D.TXT", ios::out);
file_3<<"Data summary for program DSMC"<<endl;
// OPEN (4,FILE='DS0D.DAT')
// OPEN (3,FILE='DS0D.TXT')
// WRITE (3,*) 'Data summary for program DSMC'
}
if(calc->ICLASS==1)
{
cout<<"Reading the data file DS1D.DAT"<<endl;
file_4.open("DS1D.DAT", ios::in);
file_3.open("DS1D.TXT", ios::out );
file_3<<"Data summary for program DSMC"<<endl;
// OPEN (4,FILE='DS1D.DAT')
// OPEN (3,FILE='DS1D.TXT')
// WRITE (3,*) 'Data summary for program DSMC'
}
//the following items are common to all classes of flow
file_4>>NVERD;
file_3<<"The n in version number n.m is "<<NVERD<<endl;
file_4>>MVERD;
file_3<<"The m in version number n.m is "<<MVERD<<endl;
file_4>>calc->IMEG;
file_3<<"The approximate number of megabytes for the calculation is "<<calc->IMEG<<endl;
file_4>>gas->IGAS;
file_3<<gas->IGAS<<endl;//gas->IGAS=1;
// READ (4,*) NVERD
// WRITE (3,*) 'The n in version number n.m is',NVERD
// READ (4,*) MVERD
// WRITE (3,*) 'The m in version number n.m is',MVERD
// READ (4,*) IMEG //calc->IMEG
// WRITE (3,*) 'The approximate number of megabytes for the calculation is',IMEG //calc->IMEG
// READ (4,*) IGAS //gas->IGAS
// WRITE (3,*) IGAS //gas->IGAS
if(gas->IGAS==1)
{
file_3<<" Hard sphere gas "<<endl;
// WRITE (3,*) 'Hard sphere gas'
HARD_SPHERE();
}
if(gas->IGAS==2)
{
file_3<<"Argon "<<endl;
// WRITE (3,*) 'Argon'
ARGON();
}
if(gas->IGAS==3)
{
file_3<<"Ideal nitrogen"<<endl;
// WRITE (3,*) 'Ideal nitrogen'
IDEAL_NITROGEN();
}
if(gas->IGAS==4)
{
file_3<<"Real oxygen "<<endl;
// WRITE (3,*) 'Real oxygen'
REAL_OXYGEN();
}
if(gas->IGAS==5)
{
file_3<<"Ideal air "<<endl;
// TE (3,*) 'Ideal air'
IDEAL_AIR();
}
if(gas->IGAS==6)
{
file_3<<"Real air @ 7.5 km/s "<<endl;
// RITE (3,*) 'Real air @ 7.5 km/s'
REAL_AIR();
}
if(gas->IGAS==7)
{
file_3<<"Helium-argon-xenon mixture "<<endl;
// WRITE (3,*) 'Helium-argon-xenon mixture'
HELIUM_ARGON_XENON();
}
if(gas->IGAS==8)
{
file_3<<"Oxygen-hydrogen "<<endl;
// WRRITE (3,*) 'Oxygen-hydrogen'
OXYGEN_HYDROGEN();
}
file_3<<"The gas properties are:- "<<endl;
file_4>>gas->FND[1];
file_3<<"The stream number density is "<<gas->FND[1]<<endl;
file_4>>gas->FTMP[1];
file_3<<"The stream temperature is "<<gas->FTMP[1]<<endl;
// WRITE (3,*) 'The gas properties are:-'
// READ (4,*) FND(1) //gas->FND[1]
// WRITE (3,*) ' The stream number density is',FND(1) ////gas->FND[1]
// READ (4,*) FTMP(1) //gas->FTMP[1]
// WRITE (3,*) ' The stream temperature is',FTMP(1) //gas->FTMP[1]
if(gas->MMVM>0)
{
file_4>>gas->FVTMP[1];
file_3<<"The stream vibrational and electronic temperature is "<<gas->FVTMP[1]<<endl;
// READ (4,*) FVTMP(1) //gas->FVTMP;
// WRITE (3,*) ' The stream vibrational and electronic temperature is',FVTMP(1) //gas->FVTMP[1]
}
if(calc->ICLASS==1)
{
file_4>>gas->VFX[1];
file_3<<"The stream velocity in the x direction is "<<gas->VFX[1]<<endl;
file_4>>gas->VFY[1];
file_3<<"The stream velocity in the y direction is "<<gas->VFY[1]<<endl;
// READ (4,*) VFX(1) //gas->VFX[1]
// WRITE (3,*) ' The stream velocity in the x direction is',VFX(1) //gas->VFX[1]
// READ (4,*) VFY(1) ////gas->VFY[1]
// WRITE (3,*) ' The stream velocity in the y direction is',VFY(1) ////gas->VFY[1]
}
if(gas->MSP>1)
{
for(N=1;N<=gas->MSP;N++)
{
file_4>>gas->FSP[N][1];
file_3<<" The fraction of species "<<N<<" is "<<gas->FSP[N][1]<<endl;
// READ (4,*) FSP(N,1) //gas->FSP[N][1]
// WRITE (3,*) ' The fraction of species',N,' is',FSP(N,1) //gas->FSP[N][1]
}
}
else
{
gas->FSP[1][1]=1.0; //simple gas
}
if(calc->ICLASS==0){
// !--a homogeneous gas case is calculated as a one-dimensional flow with a single sampling cell
// !--set the items that are required in the DS1D.DAT specification
geom->IFX=0;
geom->JFX=1;
geom->XB[1]=0.e00;
geom->XB[2]=0.0001e00*1.e25/gas->FND[1];
geom->ITYPE[1]=1;
geom->ITYPE[2]=1;
gas->VFX[1]=0.e00;
calc->IGS=1;
calc->ISECS=0;
calc->IREM=0;
calc->MOLSC=10000*calc->IMEG; //a single sampling cell
}
if(calc->ICLASS==1)
{
file_4>>geom->IFX;
// READ (4,*) IFX //geom->IFX
if(geom->IFX==0)
file_3<<"Plane Flow"<<endl;
// WRITE (3,*) 'Plane flow'
if(geom->IFX==1)
file_3<<"Cylindrical flow"<<endl;
// WRITE (3,*) 'Cylindrical flow'
if(geom->IFX==2)
file_3<<"Spherical flow"<<endl;
// WRITE (3,*) 'Spherical flow'
geom->JFX=geom->IFX+1;
file_4>>geom->XB[1];
// READ (4,*) XB(1) //geom->XB[1]
file_3<<"The minimum x coordinate is "<<geom->XB[1]<<endl;
// WRITE (3,*) 'The minimum x coordinate is',XB(1) //geom->XB[1]
file_4>>geom->ITYPE[1];
// READ (4,*) ITYPE(1) //geom->ITYPE[1]
if(geom->ITYPE[1]==0)
file_3<<"The minimum x coordinate is a stream boundary"<<endl;
// WRITE (3,*) 'The minimum x coordinate is a stream boundary'
if(geom->ITYPE[1]==1)
file_3<<"The minimum x coordinate is a plane of symmetry"<<endl;
// WRITE (3,*) 'The minimum x coordinate is a plane of symmetry'
if(geom->ITYPE[1]==2)
file_3<<"The minimum x coordinate is a solid surface"<<endl;
// WRITE (3,*) 'The minimum x coordinate is a solid surface'
if(geom->ITYPE[1]==3)
file_3<<"The minimum x coordinate is a vacuum"<<endl;
// WRITE (3,*) 'The minimum x coordinate is a vacuum'
if(geom->ITYPE[1]==4)
file_3<<"The minimum x coordinate is an axis or center"<<endl;
// WRITE (3,*) 'The minimum x coordinate is an axis or center'
if(geom->ITYPE[1]==2)
{
file_3<<"The minimum x boundary is a surface with the following properties"<<endl;
file_4>>gas->TSURF[1];
file_3<<"The temperature of the surface is "<<gas->TSURF[1]<<endl;
file_4>>gas->FSPEC[1];
file_3<<"The fraction of specular reflection is "<<gas->FSPEC[1]<<endl;
file_4>>gas->VSURF[1];
file_3<<"The velocity in the y direction of this surface is "<<gas->VSURF[1];
// WRITE (3,*) 'The minimum x boundary is a surface with the following properties'
// READ (4,*) TSURF(1) //gas->TSURF[1]
// WRITE (3,*) ' The temperature of the surface is',TSURF(1) //gas->TSURF[1]
// READ (4,*) FSPEC(1) //gas->FSPEC[1]
// WRITE (3,*) ' The fraction of specular reflection is',FSPEC(1) //gas->FSPEC[1]
// READ (4,*) VSURF(1) //gas->VSURF[1]
// WRITE (3,*) ' The velocity in the y direction of this surface is',VSURF(1) //gas->VSURF[1]
}
file_4>>geom->XB[2];
file_3<<"The maximum x coordinate is "<<geom->XB[2]<<endl;
file_4>>geom->ITYPE[2];
// READ (4,*) XB(2) //geom->XB[2]
// WRITE (3,*) 'The maximum x coordinate is',XB(2)//geom->XB[2]
// READ (4,*) ITYPE(2)//geom->ITYPE[2]
if(geom->ITYPE[2]==0)
file_3<<"The mmaximum x coordinate is a stream boundary"<<endl;
// WRITE (3,*) 'The mmaximum x coordinate is a stream boundary'
if(geom->ITYPE[2]==1)
file_3<<"The maximum x coordinate is a plane of symmetry"<<endl;
// WRITE (3,*) 'The maximum x coordinate is a plane of symmetry'
if(geom->ITYPE[2]==2)
file_3<<"The maximum x coordinate is a solid surface"<<endl;
// WRITE (3,*) 'The maximum x coordinate is a solid surface'
if(geom->ITYPE[2]==3)
file_3<<"The maximum x coordinate is a vacuum"<<endl;
// WRITE (3,*) 'The maximum x coordinate is a vacuum'
calc->ICN=0;
if(geom->ITYPE[2]==4)
{
file_3<<"The maximum x coordinate is a stream boundary with a fixed number of simulated molecules"<<endl;
// WRITE (3,*) 'The maximum x coordinate is a stream boundary with a fixed number of simulated molecules'
if(gas->MSP==1)
calc->ICN=1;
}
if(geom->ITYPE[2]==2)
{
file_3<<"The maximum x boundary is a surface with the following properties"<<endl;
file_4>>gas->TSURF[1];
file_3<<"The temperature of the surface is "<<gas->TSURF[1]<<endl;
file_4>>gas->FSPEC[1];
file_3<<"The fraction of specular reflection is "<<gas->FSPEC[1]<<endl;
file_4>>gas->VSURF[1];
file_3<<"The velocity in the y direction of this surface is "<<gas->VSURF[1]<<endl;
// WRITE (3,*) 'The maximum x boundary is a surface with the following properties'
// READ (4,*) TSURF(1) //gas->TSURF[1]
// WRITE (3,*) ' The temperature of the surface is',TSURF(1) //gas->TSURF[1]
// READ (4,*) FSPEC(1) //gas->FSPEC[1]
// WRITE (3,*) ' The fraction of specular reflection is',FSPEC(1) //gas->FSPEC[1]
// READ (4,*) VSURF(1) //gas->VSURF[1]
// WRITE (3,*) ' The velocity in the y direction of this surface is',VSURF(1) //gas->VSURF[1]
}
if(geom->IFX>0)
{
file_4>>geom->IWF;
// READ (4,*) READ (4,*) IWF //geom->IWF
if(geom->IWF==0)
file_3<<"There are no radial weighting factors"<<endl;
// WRITE (3,*) 'There are no radial weighting factors'
if(geom->IWF==1)
file_3<<"There are radial weighting factors"<<endl;
// WRITE (3,*) 'There are radial weighting factors'
if(geom->IWF==1)
{
file_4>>geom->WFM;
file_3<<"The maximum value of the weighting factor is "<<geom->WFM<<endl;
// READ (4,*) WFM //geom->WFM
// WRITE (3,*) 'The maximum value of the weighting factor is ',WFM //geom->WFM
geom->WFM=(geom->WFM-1)/geom->XB[2];
}
}
file_4>>calc->IGS;
// READ (4,*) IGS //calc->IGS
if(calc->IGS==0)
file_3<<"The flowfield is initially a vacuum "<<endl;
// WRITE (3,*) 'The flowfield is initially a vacuum'
if(calc->IGS==1)
file_3<<"The flowfield is initially the stream(s) or reference gas"<<endl;
// WRITE (3,*) 'The flowfield is initially the stream(s) or reference gas'
file_4>>calc->ISECS;
// READ (4,*) ISECS //calc->ISECS
if(calc->ISECS==0)
file_3<<"There is no secondary stream initially at x > 0"<<endl;
// WRITE (3,*) 'There is no secondary stream initially at x > 0'
if(calc->ISECS==1 && geom->IFX==0)
file_3<<"There is a secondary stream applied initially at x = 0 (XB(2) must be > 0)"<<endl;
// WRITE (3,*) 'There is a secondary stream applied initially at x = 0 (XB(2) must be > 0)'
if(calc->ISECS==1 && geom->IFX>0)
{
if(geom->IWF==1)
{
file_3<<"There cannot be a secondary stream when weighting factors are present"<<endl;
// WRITE (3,*) 'There cannot be a secondary stream when weighting factors are present'
return;//STOP//dout
}
file_3<<"There is a secondary stream"<<endl;
// WRITE (3,*) 'There is a secondary stream'
file_4>>geom->XS;
// READ (4,*) XS //geom->XS
file_3<<"The secondary stream boundary is at r= "<<geom->XS<<endl;
// WRITE (3,*) 'The secondary stream boundary is at r=',XS //geom->XS
}
if(calc->ISECS==1)
{
file_3<<"The secondary stream (at x>0 or X>XS) properties are:-"<<endl;
file_4>>gas->FND[2];
file_3<<"The stream number density is "<<gas->FND[2]<<endl;
file_4>>gas->FTMP[2];
file_3<<"The stream temperature is "<<gas->FTMP[2]<<endl;
// WRITE (3,*) 'The secondary stream (at x>0 or X>XS) properties are:-'
// READ (4,*) FND(2) //gas->FND
// WRITE (3,*) ' The stream number density is',FND(2) //gas->FND
// READ (4,*) FTMP(2) //gas->FTMP
// WRITE (3,*) ' The stream temperature is',FTMP(2) //gas->FTMP
if(gas->MMVM>0)
{
file_4>>gas->FVTMP[2];
file_3<<"The stream vibrational and electronic temperature is "<<gas->FVTMP[2]<<endl;
// READ (4,*) FVTMP(2) //gas->FVTMP[2]
// WRITE (3,*) ' The stream vibrational and electronic temperature is',FVTMP(2) //gas->FVTMP[2]
}
file_4>>gas->VFX[2];
file_3<<"The stream velocity in the x direction is "<<gas->VFX[2]<<endl;
file_4>>gas->VFY[2];
file_3<<"The stream velocity in the y direction is "<<gas->VFY[2]<<endl;
// READ (4,*) VFX(2) //gas->VFX
// WRITE (3,*) ' The stream velocity in the x direction is',VFX(2) //gas->VFX
// READ (4,*) VFY(2) //gas->VFY
// WRITE (3,*) ' The stream velocity in the y direction is',VFY(2) //gas->VFY
if(gas->MSP>1)
{
for(N=1;N<=gas->MSP;N++)
{
file_4>>gas->FSP[N][2];
file_3<<"The fraction of species "<<N<<" is "<<gas->FSP[N][2]<<endl;
// READ (4,*) FSP(N,2) //gas->FSP
// WRITE (3,*) ' The fraction of species',N,' is',FSP(N,2) //gas->FSP
}
}
else
{
gas->FSP[1][2]=1;
}
}
if(geom->IFX==0 && geom->ITYPE[1]==0)
{
file_4>>calc->IREM;
// READ (4,*) IREM //calc->IREM
if(calc->IREM==0)
{
file_3<<"There is no molecule removal"<<endl;
// WRITE (3,*) 'There is no molecule removal'
geom->XREM=geom->XB[1]-1.e00;
geom->FREM=0.e00;
}
else if(calc->IREM==1)
{
file_4>>geom->XREM;
file_3<<"There is full removal of the entering (at XB(1)) molecules between "<<geom->XREM<<" and "<<geom->XB[2]<<endl;
// READ (4,*) XREM //geom->XREM
// WRITE (3,*) ' There is full removal of the entering (at XB(1)) molecules between',XREM,' and',XB(2) //geom->XREM ,geom->XB[2]
geom->FREM=1.e00;
}
else if(calc->IREM==2)
{
file_3<<"Molecule removal is specified whenever the program is restarted"<<endl;
// WRITE (3,*) ' Molecule removal is specified whenever the program is restarted'
geom->XREM=geom->XB[1]-1.e00;
geom->FREM=0.e00;
}
else
{
geom->XREM=geom->XB[1]-1.e00;
geom->FREM=0.e00;
}
}
geom->IVB=0;
geom->VELOB=0.e00;
if(geom->ITYPE[2]==1)
{
file_4>>geom->IVB;
// READ (4,*) IVB
if(geom->IVB==0)
file_3<<"The outer boundary is stationary"<<endl;
// WRITE (3,*) ' The outer boundary is stationary'
if(geom->IVB==1)
{
file_3<<"The outer boundary moves with a constant speed"<<endl;
file_4>>geom->VELOB;
file_3<<" The speed of the outer boundary is "<<geom->VELOB<<endl;
// WRITE (3,*) ' The outer boundary moves with a constant speed'
// READ (4,*) VELOB //geom->VELOB
// WRITE (3,*) ' The speed of the outer boundary is',VELOB //geom->VELOB
}
}
file_4>>calc->MOLSC;
file_3<<"The desired number of molecules in a sampling cell is "<<calc->MOLSC<<endl;
// READ (4,*) MOLSC //calc->MOLSC
// WRITE (3,*) 'The desired number of molecules in a sampling cell is',MOLSC ////calc->MOLSC
}
//set the speed of the outer boundary
file_3.close();
file_4.close();
// CLOSE (3)
// CLOSE (4)
// set the stream at the maximum x boundary if there is no secondary stream
if(calc->ISECS==0 && geom->ITYPE[2]==0)
{
gas->FND[2]=gas->FND[1];
gas->FTMP[2]=gas->FTMP[1];
if(gas->MMVM>0)
gas->FVTMP[2]=gas->FVTMP[1];
gas->VFX[2]=gas->VFX[1];
if(gas->MSP>1)
{
for(N=1;N<=gas->MSP;N++)
{
gas->FSP[N][2]=gas->FSP[N][1];
}
}
else
gas->FSP[1][2]=1;
}
//dout
//1234 CONTINUE;
return;
}
void INITIALISE_SAMPLES()
{
//start a new sample for all classes of flow
//CALC calc;
//GEOM_1D geom;
//GAS gas;
//OUTPUT output;
//MOLECS molecs;
int N;
//
output->NSAMP=0.0;
output->TISAMP=calc->FTIME;
output->NMISAMP=molecs->NM;
//memset(output->COLLS,0.e00,sizeof(*output->COLLS));memset(output->WCOLLS,0.e00,sizeof(*output->WCOLLS));memset(output->CLSEP,0.e00,sizeof(*output->CLSEP));
for(int i=0;i<geom->NCELLS+1;i++)
output->COLLS[i]=0.e00;
for(int i=0;i<geom->NCELLS+1;i++)
output->WCOLLS[i]=0.e00;
for(int i=0;i<geom->NCELLS+1;i++)
output->CLSEP[i]=0.e00;
//output->COLLS=0.e00 ; output->WCOLLS=0.e00 ; output->CLSEP=0.e00;
//memset(calc->TCOL,0.0,sizeof(**calc->TCOL));//calc->TCOL=0.0;
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
calc->TCOL[i][j]=0.0;
}
}
//gas->TREACG=0;
//gas->TREACL=0;
for(int i=0;i<5;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->TREACG[i][j]=0;
}
}
for(int i=0;i<5;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->TREACL[i][j]=0;
}
}
//memset(output->CS,0.0,sizeof(***output->CS));memset(output->CSS,0.0,sizeof(****output->CSS));memset(output->CSSS,0.0,sizeof(**output->CSSS));
for(int j=0;j<gas->MSP+10;j++){
for(int k=0;k<geom->NCELLS+1;k++){
for(int l=0;l<gas->MSP+1;l++)
output->CS[j][k][l]=0.0;
}
}
for(int i=0;i<9;i++){
for(int j=0;j<3;j++){
for(int k=0;k<gas->MSP+1;k++){
for(int l=0;l<3;l++)
output->CSS[i][j][k][l]=0.0;
}
}
}
for(int k=0;k<7;k++){
for(int l=0;l<3;l++)
output->CSSS[k][l]=0.0;
}
//output->CS=0.0 ; output->CSS=0.0 ; output->CSSS=0.0;
//memset(output->VIBFRAC,0.e00,sizeof(***output->VIBFRAC));//output->VIBFRAC=0.e00;
//memset(output->SUMVIB,0.e00,sizeof(**output->SUMVIB));//output->SUMVIB=0.e00;
for(int j=0;j<gas->MSP+1;j++){
for(int k=0;k<gas->MMVM+1;k++){
for(int l=0;l<151;l++)
output->VIBFRAC[j][k][l]=0.0;
}
}
for(int k=0;k<gas->MSP+1;k++){
for(int l=0;l<gas->MMVM+1;l++)
output->SUMVIB[k][l]=0.0;
}
}
////
//
void SET_INITIAL_STATE_1D()
{
//set the initial state of a homogeneous or one-dimensional flow
//
//MOLECS molecs;
//GEOM_1D geom;
//GAS gas;
//CALC calc;
//OUTPUT output;
//
//
int J,L,K,KK,KN,II,III,INC,NSET,NSC;
long long N,M;
double A,B,AA,BB,BBB,SN,XMIN,XMAX,WFMIN,DENG,ELTI,EA,XPREV;
double DMOM[4];
double VB[4][3];
double ROTE[3];
//
//NSET the alternative set numbers in the setting of exact initial state
//DMOM(N) N=1,2,3 for x,y and z momentum sums of initial molecules
//DENG the energy sum of the initial molecules
//VB alternative sets of velocity components
//ROTE alternative sets of rotational energy
//EA entry area
//INC counting increment
//ELTI initial electronic temperature
//XPREV the pevious x coordinate
//
//memset(DMOM,0.e00,sizeof(DMOM));
for(int i=0;i<4;i++)
DMOM[i]=0.e00;
DENG=0.e00;
//set the number of molecules, divisions etc. based on stream 1
//
calc->NMI=10000*calc->IMEG+2; //small changes in number for statistically independent runs
geom->NDIV=calc->NMI/calc->MOLSC; //MOLSC molecules per division
//WRITE (9,*) 'The number of divisions is',NDIV
file_9<< "The number of divisions is "<<geom->NDIV<<endl;
//
geom->MDIV=geom->NDIV;
geom->ILEVEL=0;
//
geom->i_allocate(geom->ILEVEL+1,geom->MDIV+1,geom->JDIV);
// ALLOCATE (JDIV(0:ILEVEL,MDIV),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR JDIV ARRAY',ERROR
// ENDIF
//
geom->DDIV=(geom->XB[2]-geom->XB[1])/double(geom->NDIV);
geom->NCELLS=geom->NDIV;
//WRITE (9,*) 'The number of sampling cells is',NCELLS
file_9<<"The number of sampling cells is "<< geom->NCELLS<<endl;
geom->NCIS=calc->MOLSC/calc->NMCC;
geom->NCCELLS=geom->NCIS*geom->NDIV;
//WRITE (9,*) 'The number of collision cells is',NCCELLS
file_9<< "The number of collision cells is "<<geom->NCCELLS<<endl;
//
if(geom->IFX == 0) geom->XS=0.e00;
//
if(calc->ISECS == 0){
if(geom->IFX == 0) calc->FNUM=((geom->XB[2]-geom->XB[1])*gas->FND[1])/double(calc->NMI);
if(geom->IFX == 1) calc->FNUM=PI*(pow(geom->XB[2],2)-pow(geom->XB[1],2))*gas->FND[1]/double(calc->NMI);
if(geom->IFX == 2) calc->FNUM=1.3333333333333333333333e00*PI*(pow(geom->XB[2],3)-pow(geom->XB[1],3))*gas->FND[1]/double(calc->NMI);
}
else{
if(geom->IFX == 0) calc->FNUM=((geom->XS-geom->XB[1])*gas->FND[1]+(geom->XB[2]-geom->XS)*gas->FND[2])/double(calc->NMI);
if(geom->IFX == 1) calc->FNUM=PI*((pow(geom->XS,2)-pow(geom->XB[1],2))*gas->FND[1]+(pow(geom->XB[2],2)-pow(geom->XS,2))*gas->FND[2])/double(calc->NMI);
if(geom->IFX == 2) calc->FNUM=1.3333333333333333333333e00*PI*((pow(geom->XS,3)-pow(geom->XB[1],3))*gas->FND[1]+(pow(geom->XB[2],3)-pow(geom->XS,3))*gas->FND[2])/double(calc->NMI);
}
//
calc->FNUM=calc->FNUM*calc->FNUMF;
if(calc->FNUM < 1.e00) calc->FNUM=1.e00;
//
calc->FTIME=0.e00;
//
calc->TOTMOV=0.e00;
calc->TOTCOL=0.e00;
output->NDISSOC=0;
//memset(calc->TCOL,0.e00,sizeof(**calc->TCOL));//calc->TCOL=0.e00;
for(int i=0;i<gas->MSP+1;i++){
for(int j=0;j<gas->MSP+1;j++){
calc->TCOL[i][j]=0.e00;
}
}
//memset(calc->TDISS,0.e00,sizeof(*calc->TDISS));//calc->TDISS=0.e00;
//memset(calc->TRECOMB,0.e00,sizeof(*calc->TRECOMB));//calc->TRECOMB=0.e00;
for(int i=0;i<gas->MSP+1;i++)
calc->TDISS[i]=0.e00;
for(int i=0;i<gas->MSP+1;i++)
calc->TRECOMB[i]=0.e00;
//gas->TREACG=0;
//gas->TREACL=0;
for(int i=0;i<5;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->TREACG[i][j]=0;
}
}
for(int i=0;i<5;i++){
for(int j=0;j<gas->MSP+1;j++){
gas->TREACL[i][j]=0;
}
}
//memset(gas->TNEX,0.e00,sizeof(*gas->TNEX));//gas->TNEX=0.e00;
for(int i=0;i<gas->MEX+1;i++)
gas->TNEX[i]= 0.e00;
for(N=1;N<=geom->NDIV;N++){
geom->JDIV[0][N]=-N;
}
//
geom->d_allocate(5,geom->NCELLS+1,geom->CELL);
geom->i_allocate(geom->NCELLS+1,geom->ICELL);
geom->d_allocate(6,geom->NCCELLS+1,geom->CCELL);
geom->i_allocate(4,geom->NCCELLS+1,geom->ICCELL);
// ALLOCATE (CELL(4,NCELLS),ICELL(NCELLS),CCELL(5,NCCELLS),ICCELL(3,NCCELLS),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR CELL ARRAYS',ERROR
// ENDIF
//
output->d_allocate(geom->NCELLS+1,output->COLLS);
output->d_allocate(geom->NCELLS+1,output->WCOLLS);
output->d_allocate(geom->NCELLS+1,output->CLSEP);
output->d_allocate(gas->MNSR+1,output->SREAC);
output->d_allocate(24,geom->NCELLS+1,output->VAR);
output->d_allocate(13,geom->NCELLS+1,gas->MSP+1,output->VARSP);
output->d_allocate(36+gas->MSP,3,output->VARS);
output->d_allocate(10+gas->MSP,geom->NCELLS+1,gas->MSP+1,output->CS);
output->d_allocate(9,3,gas->MSP+1,3,output->CSS);
output->d_allocate(7,3,output->CSSS);
// ALLOCATE (COLLS(NCELLS),WCOLLS(NCELLS),CLSEP(NCELLS),SREAC(MNSR),VAR(23,NCELLS),VARSP(0:12,NCELLS,MSP), &
// VARS(0:35+MSP,2),CS(0:9+MSP,NCELLS,MSP),CSS(0:8,2,MSP,2),CSSS(6,2),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR SAMPLING ARRAYS',ERROR
// ENDIF
//
if(gas->MMVM >= 0){
output->d_allocate(gas->MSP+1,gas->MMVM+1,151,output->VIBFRAC);
output->d_allocate(gas->MSP+1,gas->MMVM+1,output->SUMVIB);
// ALLOCATE (VIBFRAC(MSP,MMVM,0:150),SUMVIB(MSP,MMVM),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR RECOMBINATION ARRAYS',ERROR
// ENDIF
}
//
INITIALISE_SAMPLES();
//
//Set the initial cells
for(N=1;N<=geom->NCELLS;N++){
geom->CELL[2][N]=geom->XB[1]+double(N-1)*geom->DDIV;
geom->CELL[3][N]=geom->CELL[2][N]+geom->DDIV;
geom->CELL[1][N]=geom->CELL[2][N]+0.5e00*geom->DDIV;
if(geom->IFX == 0) geom->CELL[4][N]=geom->CELL[3][N]-geom->CELL[2][N]; //calculation assumes unit cross-section
if(geom->IFX == 1) geom->CELL[4][N]=PI*(pow(geom->CELL[3][N],2)-pow(geom->CELL[2][N],2)); //assumes unit length of full cylinder
if(geom->IFX == 2) geom->CELL[4][N]=1.33333333333333333333e00*PI*(pow(geom->CELL[3][N],3)-pow(geom->CELL[2][N],3)); //flow is in the full sphere
geom->ICELL[N]=geom->NCIS*(N-1);
for(M=1;M<=geom->NCIS;M++){
L=geom->ICELL[N]+M;
XMIN=geom->CELL[2][N]+double(M-1)*geom->DDIV/double(geom->NCIS);
XMAX=XMIN+geom->DDIV/double(geom->NCIS);
if(geom->IFX == 0) geom->CCELL[1][L]=XMAX-XMIN;
if(geom->IFX == 1) geom->CCELL[1][L]=PI*(pow(XMAX,2)-pow(XMIN,2)); //assumes unit length of full cylinder
if(geom->IFX == 2) geom->CCELL[1][L]=1.33333333333333333333e00*PI*(pow(XMAX,3)-pow(XMIN,3)); //flow is in the full sphere
geom->CCELL[2][L]=0.e00;
geom->ICCELL[3][L]=N;
}
output->VAR[11][N]=gas->FTMP[1];
output->VAR[8][N]=gas->FTMP[1];
}
//
if(geom->IWF == 0) geom->AWF=1.e00;
if(geom->IWF == 1){
//FNUM must be reduced to allow for the weighting factors
A=0.e00;
B=0.e00;
for(N=1;N<=geom->NCELLS;N++){
A=A+geom->CELL[4][N];
B=B+geom->CELL[4][N]/(1.0+geom->WFM*pow(geom->CELL[1][N],geom->IFX));
}
geom->AWF=A/B;
calc->FNUM=calc->FNUM*B/A;
}
//
//WRITE (9,*) 'FNUM is',FNUM
file_9<<"FNUM is "<<calc->FNUM<<endl;
//
//set the information on the molecular species
//
A=0.e00;
B=0.e00;
for(L=1;L<=gas->MSP;L++){
A=A+gas->SP[5][L]*gas->FSP[L][1];
B=B+(3.0+gas->ISPR[1][L])*gas->FSP[L][1];
gas->VMP[L][1]=sqrt(2.e00*BOLTZ*gas->FTMP[1]/gas->SP[5][L]);
if((geom->ITYPE[2]== 0) || (calc->ISECS == 1)) gas->VMP[L][2]=sqrt(2.e00*BOLTZ*gas->FTMP[2]/gas->SP[5][L]);
calc->VNMAX[L]=3.0*gas->VMP[L][1];
if(L == 1)
gas->VMPM=gas->VMP[L][1];
else
if(gas->VMP[L][1] > gas->VMPM) gas->VMPM=gas->VMP[L][1];
}
//WRITE (9,*) 'VMPM =',VMPM
file_9<< "VMPM = "<<gas->VMPM<<endl;
gas->FDEN=A*gas->FND[1];
gas->FPR=gas->FND[1]*BOLTZ*gas->FTMP[1];
gas->FMA=gas->VFX[1]/sqrt((B/(B+2.e00))*BOLTZ*gas->FTMP[1]/A);
//set the molecular properties for collisions between unlike molecles
//to the average of the molecules
for(L=1;L<=gas->MSP;L++){
for(M=1;M<=gas->MSP;M++){
gas->SPM[4][L][M]=0.5e00*(gas->SP[1][L]+gas->SP[1][M]);
gas->SPM[3][L][M]=0.5e00*(gas->SP[3][L]+gas->SP[3][M]);
gas->SPM[5][L][M]=0.5e00*(gas->SP[2][L]+gas->SP[2][M]);
gas->SPM[1][L][M]=gas->SP[5][L]*(gas->SP[5][M]/(gas->SP[5][L]+gas->SP[5][M]));
gas->SPM[2][L][M]=0.25e00*PI*pow((gas->SP[1][L]+gas->SP[1][M]),2);
AA=2.5e00-gas->SPM[3][L][M];
A=tgamma(AA);
gas->SPM[6][L][M]=1.e00/A;
gas->SPM[8][L][M]=0.5e00*(gas->SP[4][L]+gas->SP[4][M]);
if((gas->ISPR[1][L] > 0) && (gas->ISPR[1][M] > 0))
gas->SPM[7][L][M]=(gas->SPR[1][L]+gas->SPR[1][M])*0.5e00;
if((gas->ISPR[1][L] > 0) && (gas->ISPR[1][M] == 0))
gas->SPM[7][L][M]=gas->SPR[1][L];
if((gas->ISPR[1][M] > 0) && (gas->ISPR[1][L] == 0))
gas->SPM[7][L][M]=gas->SPR[1][M];
}
}
if(gas->MSP == 1){ //set unscripted variables for the simple gas case
gas->RMAS=gas->SPM[1][1][1];
gas->CXSS=gas->SPM[2][1][1];
gas->RGFS=gas->SPM[6][1][1];
}
//
for(L=1;L<=gas->MSP;L++){
gas->CR[L]=0.e00;
for(M=1;M<=gas->MSP;M++){ //set the equilibrium collision rates
gas->CR[L]=gas->CR[L]+2.e00*SPI*pow(gas->SPM[4][L][M],2)*gas->FND[1]*gas->FSP[M][1]*pow((gas->FTMP[1]/gas->SPM[5][L][M]),(1.0-gas->SPM[3][L][M]))*sqrt(2.0*BOLTZ*gas->SPM[5][L][M]/gas->SPM[1][L][M]);
}
}
A=0.e00;
for(L=1;L<=gas->MSP;L++)
A=A+gas->FSP[L][1]*gas->CR[L];
gas->CTM=1.e00/A;
//WRITE (9,*) 'Collision time in the stream is',CTM
file_9<< "Collision time in the stream is "<<gas->CTM;
//
for(L=1;L<=gas->MSP;L++){
gas->FP[L]=0.e00;
for(M=1;M<=gas->MSP;M++){
gas->FP[L]=gas->FP[L]+PI*pow(gas->SPM[4][L][M],2)*gas->FND[1]*gas->FSP[M][1]*pow((gas->FTMP[1]/gas->SPM[5][L][M]),(1.0-gas->SPM[3][L][M]))*sqrt(1.e00+gas->SP[5][L]/gas->SP[5][M]);
}
gas->FP[L]=1.e00/gas->FP[L];
}
gas->FPM=0.e00;
for(L=1;L<=gas->MSP;L++)
gas->FPM=gas->FPM+gas->FSP[L][1]*gas->FP[L];
//WRITE (9,*) 'Mean free path in the stream is',FPM
file_9<<"Mean free path in the stream is "<<gas->FPM<<endl;
//
calc->TNORM=gas->CTM;
if(calc->ICLASS == 1) calc->TNORM= (geom->XB[2]-geom->XB[1])/gas->VMPM; //there may be alternative definitions
//
//set the initial time step
calc->DTM=gas->CTM*calc->CPDTM;
//
if(fabs(gas->VFX[1]) > 1.e-6)
A=(0.5e00*geom->DDIV/gas->VFX[1])*calc->TPDTM;
else
A=0.5e00*geom->DDIV/gas->VMPM;
if(geom->IVB == 1){
B=0.25e00*geom->DDIV/(fabs(geom->VELOB)+gas->VMPM);
if(B < A) A=B;
}
if(calc->DTM > A) calc->DTM=A;
//
calc->DTM=0.1e00*calc->DTM; //OPTIONAL MANUAL ADJUSTMENT that is generally used with a fixed time step (e.g for making x-t diagram)
//
calc->DTSAMP=calc->SAMPRAT*calc->DTM;
calc->DTOUT=calc->OUTRAT*calc->DTSAMP;
calc->TSAMP=calc->DTSAMP;
calc->TOUT=calc->DTOUT;
calc->ENTMASS=0.0;
//
//WRITE (9,*) 'The initial value of the overall time step is',DTM
file_9<< "The initial value of the overall time step is "<<calc->DTM<<endl;
//
//initialise cell quantities associated with collisions
//
for(N=1;N<=geom->NCCELLS;N++){
geom->CCELL[3][N]=calc->DTM/2.e00;
geom->CCELL[4][N]=2.e00*gas->VMPM*gas->SPM[2][1][1];
calc->RANF=((double)rand()/(double)RAND_MAX);
// RANDOM_NUMBER(RANF)
geom->CCELL[2][N]=calc->RANF;
geom->CCELL[5][N]=0.e00;
}
//
//set the entry quantities
//
for(K=1;K<=2;K++){
if((geom->ITYPE[K] == 0) || ((K == 2) && (geom->ITYPE[K] == 4))){
if(geom->IFX == 0) EA=1.e00;
if(geom->IFX == 1) EA=2.e00*PI*geom->XB[K];
if(geom->IFX == 2) EA=4.e00*PI*pow(geom->XB[K],2);
for(L=1;L<=gas->MSP;L++){
if(K == 1) SN=gas->VFX[1]/gas->VMP[L][1];
if(K == 2) SN=-gas->VFX[2]/gas->VMP[L][2];
AA=SN;
A=1.e00+erf(AA);
BB=exp(-pow(SN,2));
gas->ENTR[3][L][K]=SN;
gas->ENTR[4][L][K]=SN+sqrt(pow(SN,2)+2.e00);
gas->ENTR[5][L][K]=0.5e00*(1.e00+SN*(2.e00*SN-gas->ENTR[4][L][K]));
gas->ENTR[6][L][K]=3.e00*gas->VMP[L][K];
B=BB+SPI*SN*A;
gas->ENTR[1][L][K]=EA*gas->FND[K]*gas->FSP[L][K]*gas->VMP[L][K]*B/(calc->FNUM*2.e00*SPI);
gas->ENTR[2][L][K]=0.e00;
}
}
}
//
//Set the uniform stream
//
molecs->MNM=1.1e00*calc->NMI;
//
if(gas->MMVM > 0){
molecs->d_allocate(calc->NCLASS+1,molecs->MNM+1,molecs->PX);
molecs->d_allocate(molecs->MNM+1,molecs->PTIM);
molecs->d_allocate(molecs->MNM+1,molecs->PROT);
molecs->i_allocate(molecs->MNM+1,molecs->IPCELL);
molecs->i_allocate(molecs->MNM+1,molecs->IPSP);
molecs->i_allocate(molecs->MNM+1,molecs->ICREF);
molecs->i_allocate(molecs->MNM+1,molecs->IPCP);
molecs->d_allocate(4,molecs->MNM+1,molecs->PV);
molecs->i_allocate(gas->MMVM+1,molecs->MNM+1,molecs->IPVIB);
molecs->d_allocate(molecs->MNM+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNM),PTIM(MNM),PROT(MNM),IPCELL(MNM),IPSP(MNM),ICREF(MNM),IPCP(MNM),PV(3,MNM), &
// IPVIB(MMVM,MNM),PELE(MNM),STAT=ERROR)
}
else{
if(gas->MMRM > 0){
molecs->d_allocate(calc->NCLASS+1,molecs->MNM+1,molecs->PX);
molecs->d_allocate(molecs->MNM+1,molecs->PTIM);
molecs->d_allocate(molecs->MNM+1,molecs->PROT);
molecs->i_allocate(molecs->MNM+1,molecs->IPCELL);
molecs->i_allocate(molecs->MNM+1,molecs->IPSP);
molecs->i_allocate(molecs->MNM+1,molecs->ICREF);
molecs->i_allocate(molecs->MNM+1,molecs->IPCP);
molecs->d_allocate(4,molecs->MNM+1,molecs->PV);
molecs->d_allocate(molecs->MNM+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNM),PTIM(MNM),PROT(MNM),IPCELL(MNM),IPSP(MNM),ICREF(MNM),IPCP(MNM),PV(3,MNM),PELE(MNM),STAT=ERROR)
}
else{
molecs->d_allocate(calc->NCLASS+1,molecs->MNM+1,molecs->PX);
molecs->d_allocate(molecs->MNM+1,molecs->PTIM);
molecs->i_allocate(molecs->MNM+1,molecs->IPCELL);
molecs->i_allocate(molecs->MNM+1,molecs->IPSP);
molecs->i_allocate(molecs->MNM+1,molecs->ICREF);
molecs->i_allocate(molecs->MNM+1,molecs->IPCP);
molecs->d_allocate(4,molecs->MNM+1,molecs->PV);
molecs->d_allocate(molecs->MNM+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNM),PTIM(MNM),IPCELL(MNM),IPSP(MNM),ICREF(MNM),IPCP(MNM),PV(3,MNM),PELE(MNM),STAT=ERROR)
}
}
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR MOLECULE ARRAYS',ERROR
// ENDIF
//
molecs->NM=0;
if(calc->IGS == 1){
cout<<"Setting the initial gas"<<endl;
for(L=1;L<=gas->MSP;L++){
//memset(ROTE,0.0,sizeof(ROTE));
for(int i=0;i<3;i++)
ROTE[i]=0.0;
for(K=1;K<=calc->ISECS+1;K++){
if(calc->ISECS == 0){ //no secondary stream
M=(double(calc->NMI)*gas->FSP[L][1]*geom->AWF);
XMIN=geom->XB[1];
XMAX=geom->XB[2];
}
else{
A=(pow(geom->XS,geom->JFX)-pow(geom->XB[1],geom->JFX))*gas->FND[1]+(pow(geom->XB[2],geom->JFX)-pow(geom->XS,geom->JFX))*gas->FND[2];
if(K == 1){
M=int(double(calc->NMI)*((pow(geom->XS,geom->JFX)-pow(geom->XB[1],geom->JFX))*gas->FND[1]/A)*gas->FSP[L][1]);
XMIN=geom->XB[1];
XMAX=geom->XS;
}
else{
M=int(double(calc->NMI)*((pow(geom->XB[2],geom->JFX)-pow(geom->XS,geom->JFX))*gas->FND[2]/A)*gas->FSP[L][2]);
XMIN=geom->XS;
XMAX=geom->XB[2];
}
}
if((K == 1) || (calc->ISECS == 1)){
III=0;
WFMIN=1.e00+geom->WFM*pow(geom->XB[1],geom->IFX);
N=1;
INC=1;
if((K== 2) && (geom->JFX > 1)){
BBB=(pow(XMAX,geom->JFX)-pow(XMIN,geom->JFX))/double(M);
XPREV=XMIN;
}
while(N < M){
if((geom->JFX == 1) || (K == 1))
A=pow((pow(XMIN,geom->JFX)+((double(N)-0.5e00)/double(M))*pow((XMAX-XMIN),geom->JFX)),(1.e00/double(geom->JFX)));
else{
A=pow((pow(XPREV,geom->JFX)+BBB),(1.e00/double(geom->JFX)));
XPREV=A;
}
if(geom->IWF == 0)
B=1.e00;
else{
B=WFMIN/(1.e00+geom->WFM*pow(A,geom->IFX));
if((B < 0.1e00) && (INC == 1)) INC=10;
if((B < 0.01e00) && (INC == 10)) INC=100;
if((B < 0.001e00) && (INC == 100)) INC=1000;
if((B < 0.0001e00) && (INC == 1000)) INC=10000;
}
calc->RANF=((double)rand()/(double)RAND_MAX);
// CALL RANDOM_NUMBER(RANF)
if(B*double(INC) > calc->RANF){
molecs->NM=molecs->NM+1;
molecs->PX[1][molecs->NM]=A;
molecs->IPSP[molecs->NM]=L;
molecs->PTIM[molecs->NM]=0.0;
if(geom->IVB == 0) FIND_CELL_1D(molecs->PX[1][molecs->NM],molecs->IPCELL[molecs->NM],KK);
if(geom->IVB == 1) FIND_CELL_MB_1D(molecs->PX[1][molecs->NM],molecs->IPCELL[molecs->NM],KK,molecs->PTIM[molecs->NM]);
//
for(NSET=1;NSET<=2;NSET++){
for(KK=1;KK<=3;KK++){
RVELC(A,B,gas->VMP[L][K]);
if(A < B){
if(DMOM[KK] < 0.e00)
BB=B;
else
BB=A;
}
else{
if(DMOM[KK] < 0.e00)
BB=A;
else
BB=B;
}
VB[KK][NSET]=BB;
}
if(gas->ISPR[1][L] > 0) SROT(L,gas->FTMP[K],ROTE[NSET]);
}
A=(0.5e00*gas->SP[5][L]*(pow(VB[1][1],2)+pow(VB[2][1],2)+pow(VB[3][1],2))+ROTE[1])/(0.5e00*BOLTZ*gas->FTMP[K])-3.e00-double(gas->ISPR[1][L]);
B=(0.5e00*gas->SP[5][L]*(pow(VB[1][2],2)+pow(VB[2][2],2)+pow(VB[3][2],2))+ROTE[2])/(0.5e00*BOLTZ*gas->FTMP[K])-3.e00-double(gas->ISPR[1][L]);
if(A < B){
if(DENG < 0.e00)
KN=2;
else
KN=1;
}
else{
if(DENG < 0.e00)
KN=1;
else
KN=2;
}
for(KK=1;KK<=3;KK++){
molecs->PV[KK][molecs->NM]=VB[KK][KN];
DMOM[KK]=DMOM[KK]+VB[KK][KN];
}
molecs->PV[1][molecs->NM]=molecs->PV[1][molecs->NM]+gas->VFX[K];
molecs->PV[2][molecs->NM]=molecs->PV[2][molecs->NM]+gas->VFY[K];
if(gas->ISPR[1][L] > 0) molecs->PROT[molecs->NM]=ROTE[KN];
// PROT(NM)=0.d00 //uncomment for zero initial rotational temperature (Figs. 6.1 and 6.2)
if(KN == 1) DENG=DENG+A;
if(KN == 2) DENG=DENG+B;
if(gas->MMVM > 0){
if(gas->ISPV[L] > 0){
for(J=1;J<=gas->ISPV[L];J++)
SVIB(L,gas->FVTMP[K],molecs->IPVIB[J][molecs->NM],J);
}
ELTI=gas->FVTMP[K];
if(gas->MELE > 1) SELE(L,ELTI,molecs->PELE[molecs->NM]);
}
}
N=N+INC;
}
}
}
}
//
//WRITE (9,*) 'DMOM',DMOM
//WRITE (9,*) 'DENG',DENG
file_9<<"DMOM "<<DMOM<<endl;
file_9<<"DENG "<<DENG<<endl;
}
//
calc->NMI=molecs->NM;
//
//SPECIAL CODING FOR INITIATION OF COMBUSION IN H2-02 MIXTURE (FORCED IGNITION CASES in section 6.7)
//set the vibrational levels of A% random molecules to 5
// A=0.05D00
// M=0.01D00*A*NM
// DO N=1,M
// CALL RANDOM_NUMBER(RANF)
// K=INT(RANF*DFLOAT(NM))+1
// IPVIB(1,K)=5
// END DO
//
SAMPLE_FLOW();
OUTPUT_RESULTS();
calc->TOUT=calc->TOUT-calc->DTOUT;
return;
}
void MOLECULES_ENTER_1D()
{
//molecules enter boundary at XB(1) and XB(2) and may be removed behind a wave
//MOLECS molecs;
//GAS gas;
//CALC calc;
//GEOM_1D geom;
//OUTPUT output;
//
int K,L,M,N,NENT,II,J,JJ,KK,NTRY;
double A,B,AA,BB,U,VN,XI,X,DX,DY,DZ;
//
//NENT number to enter in the time step
//
calc->ENTMASS=0.e00;
//
for(J=1;J<=2;J++){ //J is the end
if((geom->ITYPE[J] == 0) || (geom->ITYPE[J] == 4)){
KK=1;//the entry surface will normally use the reference gas (main stream) properties
if((J == 2) && (calc->ISECS == 1) && (geom->XB[2] > 0.e00)) KK=2; //KK is 1 for reference gas 2 for the secondary stream
for(L=1;L<=gas->MSP;L++){
A=gas->ENTR[1][L][J]*calc->DTM+gas->ENTR[2][L][J];
if((geom->ITYPE[2] == 4) && (calc->ICN == 1)){
NENT=A;
if(J == 1) calc->EME[L]=NENT;
if(J == 2) {
A=calc->ALOSS[L]-calc->EME[L]-calc->AJM[L];
calc->AJM[L]=0.e00;
if(A < 0.e00){
calc->AJM[L]=-A;
A=0.e00;
}
}
}
NENT=A;
gas->ENTR[2][L][J]=A-NENT;
if((geom->ITYPE[2] == 4) && (J == 2) && (calc->ICN == 1)) gas->ENTR[2][L][J]=0.e00;
if(NENT > 0){
for(M=1;M<=NENT;M++){
if(molecs->NM >= molecs->MNM){
cout<< "EXTEND_MNM from MOLECULES_ENTER "<<endl;
EXTEND_MNM(1.1);
}
molecs->NM=molecs->NM+1;
AA=max(0.e00,gas->ENTR[3][L][J]-3.e00);
BB=max(3.e00,gas->ENTR[3][L][J]+3.e00);
II=0;
while(II == 0){
calc->RANF=((double)rand()/(double)RAND_MAX);
// CALL RANDOM_NUMBER(RANF)
B=AA+(BB-AA)*calc->RANF;
U=B-gas->ENTR[3][L][J];
A=(2.e00*B/gas->ENTR[4][L][J])*exp(gas->ENTR[5][L][J]-U*U);
calc->RANF=((double)rand()/(double)RAND_MAX);
// CALL RANDOM_NUMBER(RANF)
if(A > calc->RANF) II=1;
}
molecs->PV[1][molecs->NM]=B*gas->VMP[L][KK];
if(J == 2) molecs->PV[1][molecs->NM]=-molecs->PV[1][molecs->NM];
//
RVELC(molecs->PV[2][molecs->NM],molecs->PV[3][molecs->NM],gas->VMP[L][KK]);
molecs->PV[2][molecs->NM]=molecs->PV[2][molecs->NM]+gas->VFY[J];
//
if(gas->ISPR[1][L] > 0) SROT(L,gas->FTMP[KK],molecs->PROT[molecs->NM]);
//
if(gas->MMVM > 0){
for(K=1;K<=gas->ISPV[L];K++)
SVIB(L,gas->FVTMP[KK],molecs->IPVIB[K][molecs->NM],K);
}
if(gas->MELE > 1) SELE(L,gas->FTMP[KK],molecs->PELE[molecs->NM]);
//
if(molecs->PELE[molecs->NM] > 0.e00)
continue; //DEBUG
//
molecs->IPSP[molecs->NM]=L;
//advance the molecule into the flow
calc->RANF=((double)rand()/(double)RAND_MAX);
// CALL RANDOM_NUMBER(RANF)
XI=geom->XB[J];
DX=calc->DTM*calc->RANF*molecs->PV[1][molecs->NM];
if((geom->IFX == 0) || (J == 2)) X=XI+DX;
if(J == 1){ //1-D move at outer boundary so molecule remains in flow
if(geom->IFX > 0) DY=calc->DTM*calc->RANF*molecs->PV[2][molecs->NM];
DZ=0.e00;
if(geom->IFX == 2) DZ=calc->DTM*calc->RANF*molecs->PV[3][molecs->NM];
if(geom->IFX > 0) AIFX(XI,DX,DY,DZ,X,molecs->PV[1][molecs->NM],molecs->PV[2][molecs->NM],molecs->PV[3][molecs->NM]);
}
molecs->PX[calc->NCLASS][molecs->NM]=X;
molecs->PTIM[molecs->NM]=calc->FTIME;
if(geom->IVB == 0) FIND_CELL_1D(molecs->PX[calc->NCLASS][molecs->NM],molecs->IPCELL[molecs->NM],JJ);
if(geom->IVB == 1) FIND_CELL_MB_1D(molecs->PX[calc->NCLASS][molecs->NM],molecs->IPCELL[molecs->NM],JJ,molecs->PTIM[molecs->NM]);
molecs->IPCP[molecs->NM]=0;
if(geom->XREM > geom->XB[1]) calc->ENTMASS=calc->ENTMASS+gas->SP[5][L];
}
}
}
if((geom->ITYPE[2] == 4) && (J==2) && (molecs->NM != calc->NMP) && (calc->ICN == 1))
continue;
}
}
//
//stagnation streamline molecule removal
if(geom->XREM > geom->XB[1]){
calc->ENTMASS=geom->FREM*calc->ENTMASS;
NTRY=0;
calc->ENTMASS=calc->ENTMASS+calc->ENTREM;
while((calc->ENTMASS > 0.e00) && (NTRY < 10000)){
NTRY=NTRY+1;
if(NTRY == 10000){
cout<<"Unable to find molecule for removal"<<endl;
calc->ENTMASS=0.e00;
//memset(calc->VNMAX,0.e00,sizeof(*calc->VNMAX));//calc->VNMAX=0.e00;
for(int i=0;i<gas->MSP+1;i++)
calc->VNMAX[i]=0.e00;
}
calc->RANF=((double)rand()/(double)RAND_MAX);
// CALL RANDOM_NUMBER(RANF)
N=molecs->NM*calc->RANF+0.9999999e00;
if(molecs->PX[calc->NCLASS][N] > geom->XREM){
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
//IF (RANF < ((PX(N)-XREM)/(XB(2)-XREM))**2) THEN
if(fabs(gas->VFY[1]) < 1.e-3)
VN=sqrt(molecs->PV[2][N]*molecs->PV[2][N]+molecs->PV[3][N]*molecs->PV[3][N]); //AXIALLY SYMMETRIC STREAMLINE
else
VN=fabs(molecs->PV[3][N]); //TWO-DIMENSIONAL STREAMLINE
L=molecs->IPSP[N];
if(VN > calc->VNMAX[L]) calc->VNMAX[L]=VN;
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
if(calc->RANF < VN/calc->VNMAX[L]){
REMOVE_MOL(N);
calc->ENTMASS=calc->ENTMASS-gas->SP[5][L];
NTRY=0;
}
//END IF
}
}
calc->ENTREM=calc->ENTMASS;
}
}
void FIND_CELL_1D(double &X,int &NCC,int &NSC)
{
//find the collision and sampling cells at a givem location in a 0D or 1D case
//MOLECS molecs;
//GEOM_1D geom;
//CALC calc;
int N,L,M,ND;
double FRAC,DSC;
//
//NCC collision cell number
//NSC sampling cell number
//X location
//ND division number
//DSC the ratio of the sub-division width to the division width
//
ND=(X-geom->XB[1])/geom->DDIV+0.99999999999999e00;
//
if(geom->JDIV[0][ND] < 0){ //the division is a level 0 (no sub-division) sampling cell
NSC=-geom->JDIV[0][ND];
// IF (IFX == 0)
NCC=geom->NCIS*(X-geom->CELL[2][NSC])/(geom->CELL[3][NSC]-geom->CELL[2][NSC])+0.9999999999999999e00;
NCC=NCC+geom->ICELL[NSC];
// IF (NCC == 0) NCC=1
return;
}
else{ //the molecule is in a subdivided division
FRAC=(X-geom->XB[1])/geom->DDIV-double(ND-1);
M=ND;
for(N=1;N<=geom->ILEVEL;N++){
DSC=1.e00/double(N+1);
for(L=1;L<=2;L++){ //over the two level 1 subdivisions
if(((L == 1) && (FRAC < DSC)) || ((L == 2) || (FRAC >= DSC))){
M=geom->JDIV[N-1][M]+L; //the address in JDIV
if(geom->JDIV[N][M] < 0){
NSC=-geom->JDIV[N][M];
NCC=geom->NCIS*(X-geom->CELL[2][NSC])/(geom->CELL[3][NSC]-geom->CELL[2][NSC])+0.999999999999999e00;
if(NCC == 0) NCC=1;
NCC=NCC+geom->ICELL[NSC];
return;
}
}
}
FRAC=FRAC-DSC;
}
}
file_9<<"No cell for molecule at x= "<<X<<endl;
return ;
}
void FIND_CELL_MB_1D(double &X,int &NCC,int &NSC,double &TIM)
{
//find the collision and sampling cells at a givem location in a 0D or 1D case
//when there is a moving boundary
//MOLECS molecs;
//GEOM_1D geom;
//CALC calc;
//
// IMPLICIT NONE
//
int N,L,M,ND;
double FRAC,DSC,A,B,C;
//
//NCC collision cell number
//NSC sampling cell number
//X location
//ND division number
//DSC the ratio of the sub-division width to the division width
//TIM the time
//
A=(geom->XB[2]+geom->VELOB*TIM-geom->XB[1])/double(geom->NDIV); //new DDIV
ND=(X-geom->XB[1])/A+0.99999999999999e00;
B=geom->XB[1]+double(ND-1)*A;
//
//the division is a level 0 sampling cell
NSC=-geom->JDIV[0][ND];
NCC=geom->NCIS*(X-B)/A+0.99999999999999e00;
NCC=NCC+geom->ICELL[NSC];
//WRITE (9,*) 'No cell for molecule at x=',X
file_9<< "No cell for molecule at x= "<<X<<endl;
return;
//return ;
//
}
void RVELC(double &U,double &V,double &VMP)
{
//CALC calc;
//generates two random velocity components U and V in an equilibrium
//gas with most probable speed VMP
//based on equations (4.4) and (4.5)
double A,B;
//
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
A=sqrt(-log(calc->RANF));
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
B=DPI*calc->RANF;
U=A*sin(B)*VMP;
V=A*cos(B)*VMP;
return;
}
void SROT(int &L,double &TEMP,double &ROTE)
{
//sets a typical rotational energy ROTE of species L
//CALC calc;
//GAS gas;
//
// IMPLICIT NONE
//
int I;
double A,B,ERM;
//
if(gas->ISPR[1][L] == 2){
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
ROTE=-log(calc->RANF)*BOLTZ*TEMP; //equation (4.8)
}
else{
A=0.5e00*gas->ISPR[1][L]-1.e00;
I=0;
while(I == 0){
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
ERM=calc->RANF*10.e00;
//there is an energy cut-off at 10 kT
B=(pow((ERM/A),A))*exp(A-ERM); //equation (4.9)
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
if(B > calc->RANF) I=1;
}
ROTE=ERM*BOLTZ*TEMP;
}
return;
}
void SVIB(int &L,double &TEMP,int &IVIB, int &K)
{
//sets a typical vibrational state at temp. TEMP of mode K of species L
//GAS gas;
//CALC calc;
//
// IMPLICIT NONE
//
int N;
// double TEMP;
// int IVIB;
//
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
N=-log(calc->RANF)*TEMP/gas->SPVM[1][K][L]; //eqn(4.10)
//the state is truncated to an integer
IVIB=N;
}
void SELE(int &L,double &TEMP, double &ELE)
{
//sets a typical electronic energy at temp. TEMP of species L
//employs direct sampling from the Boltzmann distribution
//GAS gas;
//CALC calc;
//
// IMPLICIT NONE
//
int K,N;
double EPF,A,B;
double CTP[20];
//
//ELE electronic energy of a molecule
//EPF electronic partition function
//CTP(N) contribution of electronic level N to the electronic partition function
//
if(TEMP > 0.1){
EPF=0.e00;
for(N=1;N<=gas->NELL[L];N++)
EPF=EPF+gas->QELC[1][N][L]*exp(-gas->QELC[2][N][L]/TEMP) ;
//
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
//
A=0.0;
K=0; //becomes 1 when the energy is set
N=0; //level
while(K == 0){
N=N+1;
A=A+gas->QELC[1][N][L]*exp(-gas->QELC[2][N][L]/TEMP);
B=A/EPF;
if(calc->RANF < B){
K=1;
ELE=BOLTZ*gas->QELC[2][N][L];
}
}
}
else
ELE=0.e00;
//
}
void CQAX(double &A,double &X,double &GAX)
{
//calculates the function Q(a,x)=Gamma(a,x)/Gamma(a)
//
// IMPLICIT NONE
double G,DT,T,PV,V;
int NSTEP,N;
//
G=tgamma(A);
//
if(X < 10.e00){ //direct integration
NSTEP=100000;
DT=X/double(NSTEP);
GAX=0.e00;
PV=0.e00;
for(N=1;N<=NSTEP;N++){
T=double(N)*DT;
V=exp(-T)*pow(T,(A-1));
GAX=GAX+(PV+V)*DT/2.e00;
PV=V;
}
GAX=1.e00-GAX/G;
}
else{ //asymptotic formula
GAX=pow(X,(A-1.e00))*exp(-X)*(1.0+(A-1.e00)/X+(A-1.e00)*(A-2.e00)/pow(X,2)+(A-1.e00)*(A-2.e00)*(A-3.e00)/pow(X,3)+(A-1.e00)*(A-2.e00)*(A-3.e00)*(A-4.e00)/pow(X,4));
GAX=GAX/G;
}
//
return;
}
//*****************************************************************************
//
void LBS(double XMA,double XMB,double &ERM)
{
//selects a Larsen-Borgnakke energy ratio using eqn (11.9)
//
double PROB,RANF;
int I,N;
//
//I is an indicator
//PROB is a probability
//ERM ratio of rotational to collision energy
//XMA degrees of freedom under selection-1
//XMB remaining degrees of freedom-1
//
I=0;
while(I == 0){
// CALL RANDOM_NUMBER(RANF)
RANF=((double)rand()/(double)RAND_MAX);
ERM=RANF;
if((XMA < 1.e-6) || (XMB < 1.e-6)){
// IF (XMA < 1.E-6.AND.XMB < 1.E-6) RETURN
//above can never occur if one mode is translational
if(XMA < 1.e-6) PROB=pow((1.e00-ERM),XMB);
if(XMB < 1.e-6) PROB=pow((1.e00-ERM),XMA);
}
else
PROB=pow(((XMA+XMB)*ERM/XMA),XMA)*pow(((XMA+XMB)*(1.e00-ERM)/XMB),XMB);
// CALL RANDOM_NUMBER(RANF)
RANF=((double)rand()/(double)RAND_MAX);
if(PROB > RANF) I=1;
}
//
return;
}
void REFLECT_1D(int &N,int J,double &X)
{
//reflects molecule N and samples the surface J properties
//MOLECS molecs;
//GAS gas;
//GEOM_1D geom;
//CALC calc;
//OUTPUT output;
//
// IMPLICIT NONE
//
int L,K,M;
double A,B,VMPS,DTR,XI,DX,DY,DZ,WF;
//
//VMPS most probable velocity at the surface temperature
//DTR time remaining after molecule hits a surface
//
L=molecs->IPSP[N];
WF=1.e00;
if(geom->IWF == 1) WF=1.e00+geom->WFM*pow(X,geom->IFX);
output->CSS[0][J][L][1]=output->CSS[0][J][L][1]+1.e00;
output->CSS[1][J][L][1]=output->CSS[1][J][L][1]+WF;
output->CSS[2][J][L][1]=output->CSS[2][J][L][1]+WF*molecs->PV[1][N]*gas->SP[5][L];
output->CSS[3][J][L][1]=output->CSS[3][J][L][1]+WF*(molecs->PV[2][N]-gas->VSURF[J])*gas->SP[5][L];
output->CSS[4][J][L][1]=output->CSS[4][J][L][1]+WF*molecs->PV[3][N]*gas->SP[5][L];
A=pow(molecs->PV[1][N],2)+pow((molecs->PV[2][N]-gas->VSURF[J]),2)+pow(molecs->PV[3][N],2);
output->CSS[5][J][L][1]=output->CSS[5][J][L][1]+WF*0.5e00*gas->SP[5][L]*A;
if(gas->ISPR[1][L] > 0) output->CSS[6][J][L][1]=output->CSS[6][J][L][1]+WF*molecs->PROT[N];
if(gas->MELE > 1) output->CSS[8][J][L][1]=output->CSS[8][J][L][1]+WF*molecs->PELE[N];
if(gas->MMVM > 0){
if(gas->ISPV[L] > 0){
for(K=1;K<=gas->ISPV[L];K++)
output->CSS[7][J][L][1]=output->CSS[7][J][L][1]+WF*double(molecs->IPVIB[K][N])*BOLTZ*gas->SPVM[1][K][L];
}
}
A=pow(molecs->PV[1][N],2)+pow(molecs->PV[2][N],2)+pow(molecs->PV[3][N],2);
B=fabs(molecs->PV[1][N]);
output->CSSS[1][J]=output->CSSS[1][J]+WF/B;
output->CSSS[2][J]=output->CSSS[2][J]+WF*gas->SP[5][L]/B;
output->CSSS[3][J]=output->CSSS[3][J]+WF*gas->SP[5][L]*molecs->PV[2][N]/B;
//this assumes that any flow normal to the x direction is in the y direction
output->CSSS[4][J]=output->CSSS[4][J]+WF*gas->SP[5][L]*A/B;
if(gas->ISPR[1][L] > 0){
output->CSSS[5][J]=output->CSSS[5][J]+WF*molecs->PROT[N]/B;
output->CSSS[6][J]=output->CSSS[6][J]+WF*gas->ISPR[1][L]/B;
}
//
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
if(gas->FSPEC[J] > calc->RANF){ //specular reflection
X=2.e00*geom->XB[J]-X;
molecs->PV[1][N]=-molecs->PV[1][N];
DTR=(X-geom->XB[J])/molecs->PV[1][N];
}
else{ //diffuse reflection
VMPS=sqrt(2.e00*BOLTZ*gas->TSURF[J]/gas->SP[5][L]);
DTR=(geom->XB[J]-molecs->PX[1][N])/molecs->PV[1][N];
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
molecs->PV[1][N]=sqrt(-log(calc->RANF))*VMPS;
if(J == 2) molecs->PV[1][N]=-molecs->PV[1][N];
RVELC(molecs->PV[2][N],molecs->PV[3][N],VMPS);
molecs->PV[2][N]=molecs->PV[2][N]+gas->VSURF[J];
if(gas->ISPR[1][L] > 0) SROT(L,gas->TSURF[J],molecs->PROT[N]);
if(gas->MMVM > 0){
for(K=1;K<=gas->ISPV[L];K++)
SVIB(L,gas->TSURF[J],molecs->IPVIB[K][N],K);
}
if(gas->MELE > 1) SELE(L,gas->TSURF[J],molecs->PELE[N]);
}
//
output->CSS[2][J][L][2]=output->CSS[2][J][L][2]-WF*molecs->PV[1][N]*gas->SP[5][L];
output->CSS[3][J][L][2]=output->CSS[3][J][L][2]-WF*(molecs->PV[2][N]-gas->VSURF[J])*gas->SP[5][L];
output->CSS[4][J][L][2]=output->CSS[4][J][L][2]-WF*molecs->PV[3][N]*gas->SP[5][L];
A=pow(molecs->PV[1][N],2)+pow((molecs->PV[2][N]-gas->VSURF[J]),2)+pow(molecs->PV[3][N],2);
output->CSS[5][J][L][2]=output->CSS[5][J][L][2]-WF*0.5e00*gas->SP[5][L]*A;
if(gas->ISPR[1][L] > 0) output->CSS[6][J][L][2]=output->CSS[6][J][L][2]-WF*molecs->PROT[N];
if(gas->MELE > 1) output->CSS[8][J][L][2]=output->CSS[8][J][L][2]-WF*molecs->PELE[N];
if(gas->MMVM > 0){
if(gas->ISPV[L] > 0){
for(K=1;K<=gas->ISPV[L];K++)
output->CSS[7][J][L][2]=output->CSS[7][J][L][2]-WF*double(molecs->IPVIB[K][N])*BOLTZ*gas->SPVM[1][K][L];
}
}
A=pow(molecs->PV[1][N],2)+pow(molecs->PV[2][N],2)+pow(molecs->PV[3][N],2);
B=fabs(molecs->PV[1][N]);
output->CSSS[1][J]=output->CSSS[1][J]+WF/B;
output->CSSS[2][J]=output->CSSS[2][J]+WF*gas->SP[5][L]/B;
output->CSSS[3][J]=output->CSSS[3][J]+WF*gas->SP[5][L]*molecs->PV[2][N]/B;
//this assumes that any flow normal to the x direction is in the y direction
output->CSSS[4][J]=output->CSSS[4][J]+WF*gas->SP[5][L]*A/B;
if(gas->ISPR[1][L] > 0){
output->CSSS[5][J]=WF*output->CSSS[5][J]+molecs->PROT[N]/B;
output->CSSS[6][J]=output->CSSS[6][J]+WF*gas->ISPR[1][L]/B;
}
//
XI=geom->XB[J];
DX=DTR*molecs->PV[1][N];
DZ=0.e00;
if(geom->IFX > 0) DY=DTR*molecs->PV[2][N];
if(geom->IFX == 2) DZ=DTR*molecs->PV[3][N];
if(geom->IFX == 0) X=XI+DX;
if(geom->IFX > 0) AIFX(XI,DX,DY,DZ,X,molecs->PV[1][N],molecs->PV[2][N],molecs->PV[3][N]);
//
return;
}
void RBC(double &XI, double &DX, double &DY,double &DZ, double &R,double &S)
{
//calculates the trajectory fraction S from a point at radius XI with
//note that the axis is in the y direction
//--displacements DX, DY, and DZ to a possible intersection with a
//--surface of radius R, IFX=1, 2 for cylindrical, spherical geometry
//MOLECS molecs;
//GAS gas;
//GEOM_1D geom;
//CALC calc;
//OUTPUT output;
//
// IMPLICIT NONE
//
double A,B,C,DD,S1,S2;
//
DD=DX*DX+DZ*DZ;
if(geom->IFX == 2) DD=DD+DY*DY;
B=XI*DX/DD;
C=(XI*XI-R*R)/DD;
A=B*B-C;
if(A >= 0.e00){
//find the least positive solution to the quadratic
A=sqrt(A);
S1=-B+A;
S2=-B-A;
if(S2 < 0.e00){
if(S1 > 0.e00)
S=S1;
else
S=2.e00;
}
else if(S1 < S2)
S=S1;
else
S=S2;
}
else
S=2.e00;
//setting S to 2 indicates that there is no intersection
return;
//
}
void AIFX(double &XI,double &DX, double &DY, double &DZ, double &X, double &U, double &V, double &W)
{
//
//calculates the new radius and realigns the velocity components in
//--cylindrical and spherical flows
//MOLECS molecs;
//GAS gas;
//GEOM_1D geom;
//CALC calc;
//OUTPUT output;
//
// IMPLICIT NONE
//
//INTEGER ::
double A,B,C,DR,VR,S;
//
if(geom->IFX == 1){
DR=DZ;
VR=W;
}
else if(geom->IFX == 2){
DR=sqrt(DY*DY+DZ*DZ);
VR=sqrt(V*V+W*W);
}
A=XI+DX;
X=sqrt(A*A+DR*DR);
S=DR/X;
C=A/X;
B=U;
U=B*C+VR*S;
W=-B*S+VR*C;
if(geom->IFX == 2){
VR=W;
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
A=DPI*calc->RANF;
V=VR*sin(A);
W=VR*cos(A);
}
//
return;
//
}
void REMOVE_MOL(int &N)
{
//remove molecule N and replaces it by NM
//MOLECS molecs;
//CALC calc;
//GEOM_1D geom;
//GAS gas;
// IMPLICIT NONE
//
int NC,M,K;
//N the molecule number
//M,K working integer
//
if(N != molecs->NM){
for(M=1;M<=calc->NCLASS;M++)
molecs->PX[M][N]=molecs->PX[M][molecs->NM];
for(M=1;M<=3;M++)
molecs->PV[M][N]=molecs->PV[M][molecs->NM];
if(gas->MMRM > 0) molecs->PROT[N]=molecs->PROT[molecs->NM];
molecs->IPCELL[N]=fabs(molecs->IPCELL[molecs->NM]);
molecs->IPSP[N]=molecs->IPSP[molecs->NM];
molecs->IPCP[N]=molecs->IPCP[molecs->NM];
if(gas->MMVM > 0){
for(M=1;M<=gas->MMVM;M++)
molecs->IPVIB[M][N]=molecs->IPVIB[M][molecs->NM];
}
if(gas->MELE > 1) molecs->PELE[N]=molecs->PELE[molecs->NM];
molecs->PTIM[N]=molecs->PTIM[molecs->NM];
}
molecs->NM=molecs->NM-1;
//
return;
//
}
void INDEX_MOLS()
{
//index the molecules to the collision cells
//MOLECS molecs;
//CALC calc;
//GEOM_1D geom;
// IMPLICIT NONE
//
int N,M,K;
//
//N,M,K working integer
//
for(N=1;N<=geom->NCCELLS;N++)
geom->ICCELL[2][N]=0;
//
if(molecs->NM != 0){
for(N=1;N<=molecs->NM;N++){
M=molecs->IPCELL[N];
geom->ICCELL[2][M]=geom->ICCELL[2][M]+1;
}
//
M=0;
for(N=1;N<=geom->NCCELLS;N++){
geom->ICCELL[1][N]=M;
M=M+geom->ICCELL[2][N];
geom->ICCELL[2][N]=0;
}
//
for(N=1;N<=molecs->NM;N++){
M=molecs->IPCELL[N];
geom->ICCELL[2][M]=geom->ICCELL[2][M]+1;
K=geom->ICCELL[1][M]+geom->ICCELL[2][M];
molecs->ICREF[K]=N;
}
//cin.get();
//
}
return;
}
void SAMPLE_FLOW()
{
//sample the flow properties
//MOLECS molecs;
//CALC calc;
//GEOM_1D geom;
//GAS gas;
//OUTPUT output;
//
// IMPLICIT NONE
//
int NC,NCC,LS,N,M,K,L,I,KV;
double A,TE,TT,WF;
//
//NC the sampling cell number
//NCC the collision cell number
//LS the species code
//N,M,K working integers
//TE total translational energy
//
output->NSAMP=output->NSAMP+1;
cout<<"Sample \t"<<output->NSAMP<<endl<<endl;
//WRITE (9,*) NM,'Mols. at sample',NSAMP
file_9<<molecs->NM<<" Mols. at sample "<<output->NSAMP<<endl;
//
for(N=1;N<=molecs->NM;N++){
NCC=molecs->IPCELL[N];
NC=geom->ICCELL[3][NCC];
WF=1.e00;
if(geom->IWF == 1) WF=1.e00+geom->WFM*pow(molecs->PX[1][N],geom->IFX);
if((NC > 0) && (NC <= geom->NCELLS)){
if(gas->MSP > 1)
LS=fabs(molecs->IPSP[N]);
else
LS=1;
output->CS[0][NC][LS]=output->CS[0][NC][LS]+1.e00;
output->CS[1][NC][LS]=output->CS[1][NC][LS]+WF;
for(M=1;M<=3;M++){
output->CS[M+1][NC][LS]=output->CS[M+1][NC][LS]+WF*molecs->PV[M][N];
output->CS[M+4][NC][LS]=output->CS[M+4][NC][LS]+WF*pow(molecs->PV[M][N],2);
}
if(gas->MMRM > 0) output->CS[8][NC][LS]=output->CS[8][NC][LS]+WF*molecs->PROT[N];
if(gas->MELE > 1) output->CS[9][NC][LS]=output->CS[9][NC][LS]+WF*molecs->PELE[N];
if(gas->MMVM > 0){
if(gas->ISPV[LS] > 0){
for(K=1;K<=gas->ISPV[LS];K++)
output->CS[K+9][NC][LS]=output->CS[K+9][NC][LS]+WF*double(molecs->IPVIB[K][N]);
}
}
}
else{
cout<<"Illegal sampling cell "<<NC<<" "<<NCC<<" for MOL "<<N<<" at "<<molecs->PX[1][N]<<endl;
return;
}
}
//
if(calc->FTIME > 0.5e00*calc->DTM) calc->TSAMP=calc->TSAMP+calc->DTSAMP;
//
return;
}
void ADAPT_CELLS_1D()
{
//adapt the sampling cells through the splitting of the divisions into successive levels
//the collision cells are divisions of the sampling cells
//MOLECS molecs;
//GAS gas;
//GEOM_1D geom;
//CALC calc;
//OUTPUT output;
//
// IMPLICIT NONE
//
int M,N,L,K,KK,I,J,JJ,MSEG,NSEG,NSEG1,NSEG2,MLEVEL;
double A,B,DDE,DCRIT;
int *KDIV,*NC;
int **ISD;
double *XMIN,*XMAX,*DRAT;
// INTEGER, ALLOCATABLE, DIMENSION(:) :: KDIV,NC
// INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ISD
// REAL(KIND=8), ALLOCATABLE, DIMENSION(:) :: XMIN,XMAX,DRAT
//
//DCRIT the number density ratio that causes a cell to be subdivided
//KDIV(N) the number of divisions/subdivisions (cells or further subdivisions) at level N
//DRAT(N) the contriburion to the density ratio of element N
//NC(I) the number of sampling cells at level I
//DDE the width of an element
//MSEG the maximum number of segments (a segment is the size of the smallest subdivision
//NSEG1 the (first segment-1) in the subdivision
//NSEG2 the final segment in the subdivision
//ISD(N,M) 0,1 for cell,subdivided for level N subdivision
//MLEVEL The maximum desired level ILEVEL of subdivision (cellS are proportional to 2**ILEVEL)
//
DCRIT=1.5e00; //may be altered
MLEVEL=2; //may be altered
//
//determine the level to which the divisions are to be subdivided
//
A=1.e00;
for(N=1;N<=geom->NCELLS;N++)
if(output->VAR[3][N]/gas->FND[1] > A) A=output->VAR[3][N]/gas->FND[1];
geom->ILEVEL=0;
while(A > DCRIT){
geom->ILEVEL=geom->ILEVEL+1;
A=A/2.e00;
}
if(geom->ILEVEL > MLEVEL) geom->ILEVEL=MLEVEL;
//WRITE (9,*) 'ILEVEL =',ILEVEL
file_9<<"ILEVEL = "<<geom->ILEVEL<<endl;
NSEG=pow(2,geom->ILEVEL);
MSEG=geom->NDIV*NSEG;
//
KDIV = new int[geom->ILEVEL+1];
DRAT = new double[MSEG+1];
NC = new int[geom->ILEVEL+1];
ISD = new int*[geom->ILEVEL+1];
for(int i =0; i< (geom->ILEVEL+1); ++i)
ISD[i] = new int[MSEG+1];
// ALLOCATE (KDIV(0:ILEVEL),DRAT(MSEG),NC(0:ILEVEL),ISD(0:ILEVEL,MSEG),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR KDIV ARRAY',ERROR
// ENDIF
//
DDE=(geom->XB[2]-geom->XB[1])/double(MSEG);
for(N=1;N<=MSEG;N++){
A=geom->XB[1]+(double(N)-0.5e00)*DDE;
FIND_CELL_1D(A,M,L);
DRAT[N]=output->VAR[3][L]/(gas->FND[1]*double(NSEG));
}
//
//calculate the number of subdivisions at the various levels of subdivision
KDIV=0;
//also the number of sampling cells at each level
NC=0;
//
for(N=1;N<=geom->NDIV;N++){ //divisions
ISD=0;
ISD[0][1]=1;
KDIV[0]=KDIV[0]+1;
// WRITE (9,*) 'DIVISION',N
for(I=0;I<=geom->ILEVEL;I++){ //level of subdivision
// WRITE (9,*) 'LEVEL',I
J=pow(2,I); //number of possible subdivisions at this level
JJ=NSEG/J; //number of segments in a subdivision
for(M=1;M<=J;M++){
// WRITE (9,*) 'SUBDIVISION',M
if(ISD[I][M] == 1){
NSEG1=(N-1)*NSEG+(M-1)*JJ+1;
NSEG2=NSEG1+JJ-1;
A=0.e00;
// WRITE (9,*) 'NSEG RANGE',NSEG1,NSEG2
for(L=NSEG1;L<=NSEG2;L++)
A=A+DRAT[L];
// WRITE (9,*) 'DENS CONTRIB',A
if(A < DCRIT){
NC[I]=NC[I]+1;
// WRITE (9,*) 'LEVEL',I,' CELLS TO', NC(I)
}
else{
KDIV[I+1]=KDIV[I+1]+2;
// WRITE (9,*) 'LEVEL',I+1,' SUBDIVISIONS TO',KDIV(I+1)
for(L=NSEG1-(N-1)*NSEG;L<=NSEG2-(N-1)*NSEG;L++)
ISD[I+1][L]=1;
}
}
}
}
}
//
//WRITE (9,*) 'KDIV',KDIV
file_9<<"KDIV "<<KDIV<<endl;
//
//WRITE (9,*) 'NC',NC
file_9<< "NC "<<NC<<endl;
cin.get();
//WRITE (9,*) 'Number of divisions',NDIV
file_9<<"Number of divisions "<<geom->NDIV<<endl;
A=0;
geom->NCELLS=0;
for(N=0;N<=geom->ILEVEL;N++){
A=A+double(NC[N])/(pow(2.e00,N));
geom->NCELLS=geom->NCELLS+NC[N];
}
//WRITE (9,*) 'Total divisions from sampling cells',A
//WRITE (9,*) 'Adapted sampling cells',NCELLS
file_9<< "Total divisions from sampling cells "<<A<<endl;
file_9<< "Adapted sampling cells "<<geom->NCELLS<<endl;
geom->NCCELLS=geom->NCELLS*geom->NCIS;
//WRITE (9,*) 'Adapted collision cells',NCCELLS
file_9<< "Adapted collision cells "<<geom->NCCELLS<<endl;
//
for (int i = 0; i < geom->ILEVEL+1; i++) {
cudaFree(geom->JDIV[i]); //delete [] geom->JDIV[i];
}
cudaFree(geom->JDIV); //delete [] geom->JDIV; // <- because they won't exist anymore after this
for (int i = 0; i < 5; i++) {
cudaFree(geom->CELL[i]); //delete [] geom->CELL[i];
}
cudaFree(geom->CELL); //delete [] geom->CELL; // <- because they won't exist anymore after this
cudaFree(geom->ICELL); //delete[] geom->ICELL;
for (int i = 0; i < 6; i++) {
cudaFree(geom->CCELL[i]); //delete [] geom->CCELL[i];
}
cudaFree(geom->CCELL); //delete [] geom->CCELL; // <- because they won't exist anymore after this
for (int i = 0; i < 4; i++) {
cudaFree(geom->ICCELL[i]); //delete [] geom->ICCELL[i];
}
cudaFree(geom->ICCELL); //delete [] geom->ICCELL; // <- because they won't exist anymore after this
cudaFree(output->COLLS); //delete[] output->COLLS;
cudaFree(output->WCOLLS); //delete[] output->WCOLLS;
cudaFree(output->CLSEP); //delete[] output->CLSEP;
for (int i = 0; i < 24; i++) {
cudaFree(output->VAR[i]); //delete [] output->VAR[i];
}
cudaFree(output->VAR); //delete [] output->VAR; // <- because they won't exist anymore after this
for(int i = 0; i < 13; i++)
{
for(int j = 0; j < geom->NCELLS+1; j++)
{
cudaFree(output->VARSP[i][j]); //delete [] output->VARSP[i][j];
}
cudaFree(output->VARSP[i]); //delete [] output->VARSP[i];
}
cudaFree(output->VARSP); //delete [] output->VARSP;
for(int i = 0; i < (10+gas->MSP); i++)
{
for(int j = 0; j < geom->NCELLS+1; j++)
{
cudaFree(output->CS[i][j]); //delete [] output->CS[i][j];
}
cudaFree(output->CS[i]); //delete [] output->CS[i];
}
cudaFree(output->CS); //delete [] output->CS;
/*DEALLOCATE (JDIV,CELL,ICELL,CCELL,ICCELL,COLLS,WCOLLS,CLSEP,VAR,VARSP,CS,STAT=ERROR)
IF (ERROR /= 0) THEN
WRITE (*,*)'PROGRAM COULD NOT DEALLOCATE ARRAYS IN ADAPT',ERROR
END IF*/
//
for(N=0;N<=geom->ILEVEL;N++)
if(KDIV[N] > geom->MDIV) geom->MDIV=KDIV[N];
//
geom->i_allocate(geom->ILEVEL+1,geom->MDIV, geom->JDIV);
// ALLOCATE (JDIV(0:ILEVEL,MDIV),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR JDIV ARRAY IN ADAPT',ERROR
// ENDIF
//
geom->d_allocate(5,geom->NCELLS+1, geom->CELL);
geom->i_allocate(geom->NCELLS+1, geom->ICELL);
geom->d_allocate(6, geom->NCCELLS+1, geom->CCELL);
geom->i_allocate(4, geom->NCCELLS+1,geom->ICCELL);
XMIN= new double[geom->NCCELLS+1];
XMAX = new double[geom->NCCELLS+1];
//
// ALLOCATE (CELL(4,NCELLS),ICELL(NCELLS),CCELL(5,NCCELLS),ICCELL(3,NCCELLS),XMIN(NCCELLS),XMAX(NCCELLS),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR CELL ARRAYS IN ADAPT',ERROR
// ENDIF
//
output->d_allocate(geom->NCELLS+1,output->COLLS);
output->d_allocate(geom->NCELLS+1, output->WCOLLS);
output->d_allocate(geom->NCELLS+1, output->CLSEP);
output->d_allocate(24, geom->NCELLS+1, output->VAR);
output->d_allocate(13,geom->NCELLS+1,gas->MSP+1, output->VARSP);
output->d_allocate(10+gas->MSP+1,geom->NCELLS+1,gas->MSP+1,output->CS);
// ALLOCATE (COLLS(NCELLS),WCOLLS(NCELLS),CLSEP(NCELLS),VAR(23,NCELLS),VARSP(0:12,NCELLS,MSP),CS(0:9+MSP,NCELLS,MSP),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR SAMPLING ARRAYS IN ADAPT',ERROR
// ENDIF
//
geom->NCCELLS=0;
geom->NCELLS=0;
//
//set the JDIV arrays and the sampling cells at the various levels of subdivision
KDIV=0;
geom->JDIV=0;
//
for(N=1;N<=geom->NDIV;N++){ //divisions
ISD=0;
ISD[0][1]=1;
KDIV[0]=KDIV[0]+1;
for(I=0;I<=geom->ILEVEL;I++){ //level of subdivision
J=pow(2,I); //number of possible subdivisions at this level
JJ=NSEG/J; //number of segments in a subdivision
for(M=1;M<=J;M++){
if(ISD[I][M] == 1){
NSEG1=(N-1)*NSEG+(M-1)*JJ+1;
NSEG2=NSEG1+JJ-1;
A=0.e00;
for(L=NSEG1;L<=NSEG2;L++)
A=A+DRAT[L];
if(A < DCRIT){
geom->NCELLS=geom->NCELLS+1;
output->VAR[11][geom->NCELLS]=gas->FTMP[1];
XMIN[geom->NCELLS]=geom->XB[1]+double(NSEG1-1)*DDE;
XMAX[geom->NCELLS]=XMIN[geom->NCELLS]+double(NSEG2-NSEG1+1)*DDE;
//WRITE (9,*) NCELLS,I,' XMIN,XMAX',XMIN(NCELLS),XMAX(NCELLS)
file_9<< geom->NCELLS<<" "<<I<<" XMIN,XMAX "<<XMIN[geom->NCELLS]<<" , "<<XMAX[geom->NCELLS]<<endl;
geom->JDIV[I][KDIV[I]-(J-M)]=-geom->NCELLS;
// WRITE (9,*) 'JDIV(',I,',',KDIV(I)-(J-M),')=',-NCELLS
}
else{
geom->JDIV[I][KDIV[I]-(J-M)]=KDIV[I+1];
// WRITE (9,*) 'JDIV(',I,',',KDIV(I)-(J-M),')=',KDIV(I+1)
KDIV[I+1]=KDIV[I+1]+2;
for(L=NSEG1-(N-1)*NSEG;L<=NSEG2-(N-1)*NSEG;L++)
ISD[I+1][L]=1;
}
}
}
}
}
//
//set the other quantities associated with the sampling cells and the collision cells
//
geom->NCCELLS=0;
for(N=1;N<=geom->NCELLS;N++){
geom->CELL[1][N]=(XMIN[N]+XMAX[N])/2.e00;
geom->CELL[2][N]=XMIN[N];
geom->CELL[3][N]=XMAX[N];
if(geom->IFX == 0) geom->CELL[4][N]=XMAX[N]-XMIN[N]; //calculation assumes unit cross-section
if(geom->IFX == 1) geom->CELL[4][N]=PI*(pow(XMAX[N],2)-pow(XMIN[N],2));
if(geom->IFX == 2) geom->CELL[4][N]=1.33333333333333333333e00*PI*(pow(XMAX[N],3)-pow(XMIN[N],3));
geom->ICELL[N]=geom->NCCELLS;
for(M=1;M<=geom->NCIS;M++){
geom->NCCELLS=geom->NCCELLS+1;
geom->ICCELL[3][geom->NCCELLS]=N;
geom->CCELL[1][geom->NCCELLS]=geom->CELL[4][N]/double(geom->NCIS);
geom->CCELL[3][geom->NCCELLS]=calc->DTM/2.e00;
geom->CCELL[4][geom->NCCELLS]=2.e00*gas->VMPM*gas->SPM[2][1][1];
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
geom->CCELL[2][geom->NCCELLS]=calc->RANF;
geom->CCELL[5][geom->NCCELLS]=calc->FTIME;
}
}
//
//assign the molecules to the cells
//
for(N=1;N<=molecs->NM;N++){
FIND_CELL_1D(molecs->PX[1][N],molecs->IPCELL[N],JJ);
M=molecs->IPCELL[N];
}
//
//deallocate the local variables
for (int i = 0; i < geom->ILEVEL+1; i++) {
delete [] ISD[i];
}
delete [] ISD;
delete [] NC;
delete[] KDIV;
delete [] XMAX;
delete [] XMIN;
delete [] DRAT;
/*DEALLOCATE (KDIV,NC,ISD,XMIN,XMAX,DRAT,STAT=ERROR)
IF (ERROR /= 0) THEN
WRITE (*,*)'PROGRAM COULD NOT DEALLOCATE LOCAL ARRAYS IN ADAPT',ERROR
END IF*/
//
return;
}
void EXTEND_MNM(double FAC)
{ //
//the maximum number of molecules is increased by a specified factor
//the existing molecules are copied TO disk storage
//MOLECS molecs;
//CALC calc;
//GAS gas;
//
// IMPLICIT NONE
//
int M,N,MNMN;
fstream file_7;
// REAL :: FAC
//
//M,N working integers
//MNMN extended value of MNM
//FAC the factor for the extension
MNMN=FAC*molecs->MNM;
cout<< "Maximum number of molecules is to be extended from "<<molecs->MNM<<" to "<<MNMN<<endl;
cout<< "( if the additional memory is available //// )"<<endl;
file_7.open("EXTMOLS.SCR", ios::binary | ios::out);
if(file_7.is_open()){
cout<<"EXTMOLS.SCR is opened"<<endl;
}
else{
cout<<"EXTMOLS.SCR not opened"<<endl;
}
cout<<"Start write to disk storage"<<endl;
//OPEN (7,FILE='EXTMOLS.SCR',FORM='BINARY')
//WRITE (*,*) 'Start write to disk storage'
for(N=1;N<=molecs->MNM;N++){
if(gas->MMVM > 0){
file_7<<molecs->PX[calc->NCLASS][N]<<endl<<molecs->PTIM[N]<<endl<<molecs->PROT[N]<<endl;
for(M=1;M<=3;M++)
file_7<<molecs->PV[M][N]<<endl;
file_7<<molecs->IPSP[N]<<endl<<molecs->IPCELL[N]<<endl<<molecs->ICREF[N]<<endl<<molecs->IPCP[N]<<endl;
for(M=1;M<=gas->MMVM;M++)
file_7<<molecs->IPVIB[M][N]<<endl;
file_7<<molecs->PELE[N]<<endl;//WRITE (7) PX(NCLASS,N),PTIM(N),PROT(N),(PV(M,N),M=1,3),IPSP(N),IPCELL(N),ICREF(N),IPCP(N),(IPVIB(M,N),M=1,MMVM),PELE(N)
}
else{
if(gas->MMRM > 0){
file_7<<molecs->PX[calc->NCLASS][N]<<endl<<molecs->PTIM[N]<<endl<<molecs->PROT[N]<<endl;
for(M=1;M<=3;M++)
file_7<<molecs->PV[M][N]<<endl;
file_7<<molecs->IPSP[N]<<endl<<molecs->IPCELL[N]<<endl<<molecs->ICREF[N]<<endl<<molecs->IPCP[N]<<endl<<molecs->PELE[N]<<endl;//WRITE (7) PX(NCLASS,N),PTIM(N),PROT(N),(PV(M,N),M=1,3),IPSP(N),IPCELL(N),ICREF(N),IPCP(N),PELE(N)
}
else{
file_7<<molecs->PX[calc->NCLASS][N]<<endl<<molecs->PTIM[N]<<endl;
for(M=1;M<=3;M++)
file_7<<molecs->PV[M][N]<<endl;
file_7<<molecs->IPSP[N]<<endl<<molecs->IPCELL[N]<<endl<<molecs->ICREF[N]<<endl<<molecs->IPCP[N]<<endl<<molecs->PELE[N]<<endl;//WRITE (7) PX(NCLASS,N),PTIM(N),(PV(M,N),M=1,3),IPSP(N),IPCELL(N),ICREF(N),IPCP(N),PELE(N)
}
}
}
cout<<"Disk write completed"<<endl;
// WRITE (*,*) 'Disk write completed'
// CLOSE (7)
file_7.close();
if(gas->MMVM > 0){
for(int i=0;i<calc->NCLASS+1;i++){
cudaFree(molecs->PX[i]); //delete [] molecs->PX[i];
}
cudaFree(molecs->PX); //delete [] molecs->PX;
cudaFree(molecs->PTIM); //delete [] molecs->PTIM;
cudaFree(molecs->PROT);
for(int i=0;i<4;i++){
cudaFree(molecs->PV[i]); //delete [] molecs->PV[i];
}
cudaFree(molecs->PV); //delete [] molecs->PV;
cudaFree(molecs->IPSP);
cudaFree(molecs->IPCELL);
cudaFree(molecs->ICREF);
cudaFree(molecs->IPCP);
cudaFree(molecs->PELE);
for(int i=0;i<gas->MMVM;i++){
cudaFree(molecs->IPVIB[i]); //delete [] molecs->IPVIB[i];
}
cudaFree(molecs->IPVIB); //delete molecs->IPVIB;
// for(int i=0;i<calc->NCLASS+1;i++){
// delete [] molecs->PX[i];
// }
// delete [] molecs->PX;
// delete [] molecs->PTIM;
// delete [] molecs->PROT;
// for(int i=0;i<4;i++){
// delete [] molecs->PV[i];
// }
// delete [] molecs->PV;
// delete [] molecs->IPSP;
// delete [] molecs->IPCELL;
// delete [] molecs->ICREF;
// delete [] molecs->IPCP;
// delete [] molecs->PELE;
// for(int i=0;i<gas->MMVM;i++){
// delete [] molecs->IPVIB[i];
// }
// delete molecs->IPVIB;
//DEALLOCATE (PX,PTIM,PROT,PV,IPSP,IPCELL,ICREF,IPCP,IPVIB,PELE,STAT=ERROR)
}
else{
if(gas->MMRM > 0){
for(int i=0;i<calc->NCLASS+1;i++){
cudaFree(molecs->PX[i]); //delete [] molecs->PX[i];
}
cudaFree(molecs->PX); //delete [] molecs->PX;
cudaFree(molecs->PTIM); //delete [] molecs->PTIM;
cudaFree(molecs->PROT);
for(int i=0;i<4;i++){
cudaFree(molecs->PV[i]); //delete [] molecs->PV[i];
}
cudaFree(molecs->PV); //delete [] molecs->PV;
cudaFree(molecs->IPSP);
cudaFree(molecs->IPCELL);
cudaFree(molecs->ICREF);
cudaFree(molecs->IPCP);
cudaFree(molecs->PELE);
// delete [] molecs->IPSP;
// delete [] molecs->IPCELL;
// delete [] molecs->ICREF;
// delete [] molecs->IPCP;
// delete [] molecs->PELE;//DEALLOCATE (PX,PTIM,PV,IPSP,IPCELL,ICREF,IPCP,PELE,STAT=ERROR)
// for(int i=0;i<calc->NCLASS+1;i++){
// delete [] molecs->PX[i];
// }
// delete [] molecs->PX;
// delete [] molecs->PTIM;
// delete [] molecs->PROT;
// for(int i=0;i<4;i++){
// delete [] molecs->PV[i];
// }
// delete [] molecs->PV;
// delete [] molecs->IPSP;
// delete [] molecs->IPCELL;
// delete [] molecs->ICREF;
// delete [] molecs->IPCP;
// delete [] molecs->PELE;
//DEALLOCATE (PX,PTIM,PROT,PV,IPSP,IPCELL,ICREF,IPCP,PELE,STAT=ERROR)
}
else{
for(int i=0;i<calc->NCLASS+1;i++){
cudaFree(molecs->PX[i]); //delete [] molecs->PX[i];
}
cudaFree(molecs->PX); //delete [] molecs->PX;
cudaFree(molecs->PTIM); //delete [] molecs->PTIM;
for(int i=0;i<4;i++){
cudaFree(molecs->PV[i]); //delete [] molecs->PV[i];
}
cudaFree(molecs->PV); //delete [] molecs->PV;
cudaFree(molecs->IPSP);
cudaFree(molecs->IPCELL);
cudaFree(molecs->ICREF);
cudaFree(molecs->IPCP);
cudaFree(molecs->PELE);
// delete [] molecs->IPSP;
// delete [] molecs->IPCELL;
// delete [] molecs->ICREF;
// delete [] molecs->IPCP;
// delete [] molecs->PELE;//DEALLOCATE (PX,PTIM,PV,IPSP,IPCELL,ICREF,IPCP,PELE,STAT=ERROR)
}
}
// IF (ERROR /= 0) THEN
// WRITE (*,*)'PROGRAM COULD NOT DEALLOCATE MOLECULES',ERROR
// ! STOP
// END IF
// !
if(gas->MMVM > 0){
molecs->d_allocate(calc->NCLASS+1,MNMN+1,molecs->PX);
molecs->d_allocate(MNMN+1,molecs->PTIM);
molecs->d_allocate(MNMN+1,molecs->PROT);
molecs->d_allocate(4,MNMN+1,molecs->PV);
molecs->i_allocate(MNMN+1,molecs->IPSP);
molecs->i_allocate(MNMN+1,molecs->IPCELL);
molecs->i_allocate(MNMN+1,molecs->ICREF);
molecs->i_allocate(MNMN+1,molecs->IPCP);
molecs->i_allocate(gas->MMVM+1,MNMN+1,molecs->IPVIB);
molecs->d_allocate(MNMN+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNMN),PTIM(MNMN),PROT(MNMN),PV(3,MNMN),IPSP(MNMN),IPCELL(MNMN),ICREF(MNMN),IPCP(MNMN),IPVIB(MMVM,MNMN),PELE(MNMN),STAT=ERROR)
}
else{
if(gas->MMRM > 0){
molecs->d_allocate(calc->NCLASS+1,MNMN+1,molecs->PX);
molecs->d_allocate(MNMN+1,molecs->PTIM);
molecs->d_allocate(MNMN+1,molecs->PROT);
molecs->d_allocate(4,MNMN+1,molecs->PV);
molecs->i_allocate(MNMN+1,molecs->IPSP);
molecs->i_allocate(MNMN+1,molecs->IPCELL);
molecs->i_allocate(MNMN+1,molecs->ICREF);
molecs->i_allocate(MNMN+1,molecs->IPCP);
molecs->d_allocate(MNMN+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNMN),PTIM(MNMN),PROT(MNMN),PV(3,MNMN),IPSP(MNMN),IPCELL(MNMN),ICREF(MNMN),IPCP(MNMN),PELE(MNMN),STAT=ERROR)
}
else{
molecs->d_allocate(calc->NCLASS+1,MNMN+1,molecs->PX);
molecs->d_allocate(MNMN+1,molecs->PTIM);
molecs->d_allocate(4,MNMN+1,molecs->PV);
molecs->i_allocate(MNMN+1,molecs->IPSP);
molecs->i_allocate(MNMN+1,molecs->IPCELL);
molecs->i_allocate(MNMN+1,molecs->ICREF);
molecs->i_allocate(MNMN+1,molecs->IPCP);
molecs->d_allocate(MNMN+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNMN),PTIM(MNMN),PV(3,MNMN),IPSP(MNMN),IPCELL(MNMN),ICREF(MNMN),IPCP(MNMN),PELE(MNMN),STAT=ERROR)
}
}
// IF (ERROR /= 0) THEN
// WRITE (*,*)'PROGRAM COULD NOT ALLOCATE SPACE FOR EXTEND_MNM',ERROR
// ! STOP
// END IF
// !
//memset(molecs->PX,0.0,sizeof(**molecs->PX)); memset(molecs->PTIM,0.0,sizeof(*molecs->PTIM)); memset(molecs->PV,0.0,sizeof(**molecs->PV)); memset(molecs->IPSP,0,sizeof(*molecs->IPSP)); memset(molecs->IPCELL,0,sizeof(*molecs->IPCELL)); memset(molecs->ICREF,0,sizeof(*molecs->ICREF)); memset(molecs->IPCP,0,sizeof(*molecs->IPCP)); memset(molecs->PELE,0,sizeof(*molecs->PELE));
for(int i=0;i<calc->NCLASS+1;i++){
for(int j=0;j<MNMN+1;j++)
molecs->PX[i][j]=0.0;
}
for(int i=0;i<4;i++){
for(int j=0;j<MNMN+1;j++)
molecs->PV[i][j]=0.0;
}
for(int i=0;i<MNMN+1;i++){
molecs->PTIM[i]=0.0;
molecs->IPSP[i]=0;
molecs->IPCELL[i]=0;
molecs->ICREF[i]=0;
molecs->IPCP[i]=0;
molecs->PELE[i]=0;
}
if(gas->MMRM > 0) {
for(int i=0;i<MNMN+1;i++)
molecs->PROT[i]=0.0;
//memset(molecs->PROT,0.0,sizeof(*molecs->PROT));
}
if(gas->MMVM > 0) {
for(int i=0;i<gas->MMVM+1;i++){
for(int j=0;j<MNMN+1;j++)
molecs->IPVIB[i][j]=0;
}
//memset(molecs->IPVIB,0,sizeof(**molecs->IPVIB));
}
//restore the original molecules
// OPEN (7,FILE='EXTMOLS.SCR',FORM='BINARY')
// WRITE (*,*) 'Start read back from disk storage'
file_7.open("EXTMOLS.SCR", ios::binary | ios::in);
if(file_7.is_open()){
cout<<"EXTMOLS.SCR is opened"<<endl;
}
else{
cout<<"EXTMOLS.SCR not opened"<<endl;
}
for(N=1;N<=molecs->MNM;N++){
if(gas->MMVM > 0){
file_7>>molecs->PX[calc->NCLASS][N]>>molecs->PTIM[N]>>molecs->PROT[N];
for(M=1;M<=3;M++)
file_7>>molecs->PV[M][N];
file_7>>molecs->IPSP[N]>>molecs->IPCELL[N]>>molecs->ICREF[N]>>molecs->IPCP[N];
for(M=1;M<=gas->MMVM;M++)
file_7>>molecs->IPVIB[M][N];
file_7>>molecs->PELE[N];//READ (7) PX(NCLASS,N),PTIM(N),PROT(N),(PV(M,N),M=1,3),IPSP(N),IPCELL(N),ICREF(N),IPCP(N),(IPVIB(M,N),M=1,MMVM),PELE(N)
}
else{
if(gas->MMRM > 0){
file_7>>molecs->PX[calc->NCLASS][N]>>molecs->PTIM[N]>>molecs->PROT[N];
for(M=1;M<=3;M++)
file_7>>molecs->PV[M][N];
file_7>>molecs->IPSP[N]>>molecs->IPCELL[N]>>molecs->ICREF[N]>>molecs->IPCP[N]>>molecs->PELE[N];//READ (7) PX(NCLASS,N),PTIM(N),PROT(N),(PV(M,N),M=1,3),IPSP(N),IPCELL(N),ICREF(N),IPCP(N),PELE(N)
}
else{
file_7>>molecs->PX[calc->NCLASS][N]>>molecs->PTIM[N];
for(M=1;M<=3;M++)
file_7>>molecs->PV[M][N];
file_7>>molecs->IPSP[N]>>molecs->IPCELL[N]>>molecs->ICREF[N]>>molecs->IPCP[N]>>molecs->PELE[N];//READ (7) PX(NCLASS,N),PTIM(N),(PV(M,N),M=1,3),IPSP(N),IPCELL(N),ICREF(N),IPCP(N),PELE(N)
}
}
}
cout<<"Disk read completed"<<endl;
// WRITE (*,*) 'Disk read completed'
// CLOSE (7,STATUS='DELETE')
file_7.close();
//
molecs->MNM=MNMN;
//
return;
}
void DISSOCIATION()
{
//dissociate diatomic molecules that have been marked for dissociation by -ve level or -99999 for ground state
//MOLECS molecs;
//GAS gas;
//CALC calc;
//
// IMPLICIT NONE
//
int K,KK,L,N,M,LS,MS,KV,IDISS;
double A,B,C,EA,VRR,VR,RMM,RML;
double VRC[4],VCM[4],VRCP[4];
//
N=0;
while(N < molecs->NM){
N=N+1;
IDISS=0;
L=molecs->IPSP[N];
if(gas->ISPV[L] > 0){
for(K=1;K<=gas->ISPV[L];K++){
M=molecs->IPVIB[K][N];
if(M < 0){
//dissociation
calc->TDISS[L]=calc->TDISS[L]+1.e00;
IDISS=1;
}
}
if(IDISS == 1){
EA=molecs->PROT[N]; //EA is energy available for relative translational motion of atoms
if(gas->MELE > 1) EA=EA+molecs->PELE[N];
if(molecs->NM >= molecs->MNM) EXTEND_MNM(1.1);
molecs->NM=molecs->NM+1;
//set center of mass velocity as that of molecule
VCM[1]=molecs->PV[1][N];
VCM[2]=molecs->PV[2][N];
VCM[3]=molecs->PV[3][N];
molecs->PX[calc->NCLASS][molecs->NM]=molecs->PX[calc->NCLASS][N];
molecs->IPCELL[molecs->NM]=molecs->IPCELL[N];
LS=molecs->IPSP[N];
gas->TREACL[1][LS]=gas->TREACL[1][LS]-1;
molecs->IPSP[molecs->NM]=gas->ISPVM[1][1][L];
MS=molecs->IPSP[molecs->NM];
molecs->IPSP[N]=gas->ISPVM[2][1][L];
LS=molecs->IPSP[N];
gas->TREACG[1][LS]=gas->TREACG[1][LS]+1;
gas->TREACG[1][MS]=gas->TREACG[1][MS]+1;
molecs->PTIM[molecs->NM]=molecs->PTIM[N];
VRR=2.e00*EA/gas->SPM[1][LS][MS];
VR=sqrt(VRR);
RML=gas->SPM[1][LS][MS]/gas->SP[5][MS];
RMM=gas->SPM[1][LS][MS]/gas->SP[5][LS];
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
B=2.e00*calc->RANF-1.e00;
A=sqrt(1.e00-B*B);
VRCP[1]=B*VR;
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
C=2.e00*PI*calc->RANF;
VRCP[2]=A*cos(C)*VR;
VRCP[3]=A*sin(C)*VR;
for(KK=1;KK<=3;KK++){
molecs->PV[KK][N]=VCM[KK]+RMM*VRCP[KK];
molecs->PV[KK][molecs->NM]=VCM[KK]-RML*VRCP[KK];
}
if((fabs(molecs->PV[1][N]) > 100000.e00) || (fabs(molecs->PV[1][molecs->NM]) > 100000.e00)) {
cout<< "EXCESSIVE SPEED, DISS "<< N<< " "<<molecs->PV[1][N]<<" "<<molecs->NM<<" "<<molecs->PV[1][molecs->NM]<<endl;
}
//set any internal modes to the ground state
if(gas->ISPV[LS] > 0){
for(KV=1;KV<=gas->ISPV[LS];KV++)
molecs->IPVIB[KV][N]=0;
}
if(gas->ISPR[1][LS] > 0) molecs->PROT[N]=0.e00;
if(gas->MELE > 1) molecs->PELE[N]=0.e00;
if(gas->ISPV[MS] > 0){
for(KV=1;KV<=gas->ISPV[MS];KV++)
molecs->IPVIB[KV][molecs->NM]=0;
}
if(gas->ISPR[1][MS] > 0) molecs->PROT[molecs->NM]=0.0;
if(gas->MELE > 1) molecs->PELE[molecs->NM]=0.e00;
}
}
}
return;
}
//************************************************************************************
//
void ENERGY(int I,double &TOTEN)
{
//calculate the total energy (all molecules if I=0, otherwise molecule I)
//I>0 used for dianostic purposes only
//MOLECS molecs;
//GAS gas;
//CALC calc;
//
// IMPLICIT NONE
//
int K,L,N,II,M,IV,KV,J;
double TOTENI,TOTELE;
//
TOTEN=0.0;
TOTELE=0;
//
if(I == 0){
for(N=1;N<=molecs->NM;N++){
if(molecs->IPCELL[N] > 0){
L=molecs->IPSP[N];
TOTENI=TOTEN;
TOTEN=TOTEN+gas->SP[6][L];
TOTEN=TOTEN+0.5e00*gas->SP[5][L]*(pow(molecs->PV[1][N],2)+pow(molecs->PV[2][N],2)+pow(molecs->PV[3][N],2));
if(gas->ISPR[1][L] > 0) TOTEN=TOTEN+molecs->PROT[N];
if(gas->ISPV[L] > 0){
for(KV=1;KV<=gas->ISPV[L];KV++){
J=molecs->IPVIB[KV][N];
// IF (J <0) THEN
// J=-J
// IF (J == 99999) J=0
// END IF
TOTEN=TOTEN+double(J)*BOLTZ*gas->SPVM[1][KV][L];
}
}
}
if(gas->MELE > 1){
TOTEN=TOTEN+molecs->PELE[N];
TOTELE=TOTELE+molecs->PELE[N];
}
if((TOTEN-TOTENI) > 1.e-16) cout<<"MOL "<<N<<" ENERGY "<<TOTEN-TOTENI<<endl;
}
//
//WRITE (9,*) 'Total Energy =',TOTEN,NM
//WRITE (*,*) 'Total Energy =',TOTEN,NM
file_9<<"Total Energy = "<<setprecision(25)<<TOTEN<<"\t"<<molecs->NM<<endl;
cout<<"Total Energy = "<<setprecision(20)<<TOTEN<<"\t"<<molecs->NM<<endl;
// WRITE (*,*) 'Electronic Energy =',TOTELE
}
else{
N=I;
if(molecs->IPCELL[N] > 0){
L=molecs->IPSP[N];
TOTEN=TOTEN+gas->SP[6][L];
TOTEN=TOTEN+0.5e00*gas->SP[5][L]*(pow(molecs->PV[1][N],2)+pow(molecs->PV[2][N],2)+pow(molecs->PV[3][N],2));
if(gas->ISPR[1][L] > 0) TOTEN=TOTEN+molecs->PROT[N];
if(gas->ISPV[L] > 0){
for(KV=1;KV<=gas->ISPV[L];KV++){
J=molecs->IPVIB[KV][N];
// IF (J <0) THEN
// J=-J
// IF (J == 99999) J=0
// END IF
TOTEN=TOTEN+double(J)*BOLTZ*gas->SPVM[1][KV][L];
}
}
}
}
//
return; //
}
void SETXT()
{
//generate TECPLOT files for displaying an x-t diagram of an unsteady flow
//this employs ordered data, therefore the cells MUST NOT BE ADAPTED
//N.B. some custom coding for particular problems
//
//
//MOLECS molecs;
//CALC calc;
//GEOM_1D geom;
//GAS gas;
//OUTPUT output;
//
// IMPLICIT NONE
//
int N,M,IOUT;
double A,C;
double **VALINT;
// REAL(KIND=8), ALLOCATABLE, DIMENSION(:,:) :: VALINT
//
//VALINT(N,M) the interpolated values at sampling cell M boundaries and extrapolated values at boundaries
// N=1 distance
// N=2 time
// N=3 number density
// N=4 radial velocity
// N=5 pressure (nkT)
// N=6 temperature
// N=7 h2o fraction (Sec. 7.9 only)
//
//the variables in VALINT may be altered for particular problems
//
VALINT = new double*[7];
for(int i =0; i< 7; ++i)
VALINT[i] = new double[geom->NCELLS+2];
// ALLOCATE (VALINT(6,NCELLS+1),STAT=ERROR)
//
//777 FORMAT(12G14.6)
//24[]
//Internal options
IOUT=0; //0 for dimensioned output, 1 for non-dimensional output
//
A=1.e00; //dt/dt for selection of v velocity component in TECPLOT to draw particle paths as "streamlines"
//
if(calc->FTIME < 0.5e00*calc->DTM){
//Headings and zero time record
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR VALINT ARRAY',ERROR
// ENDIF
calc->NLINE=1;
file_9<< "J in tecplot file = "<<calc->NLINE*(geom->NCELLS+1)<<endl;
// WRITE (18,*) 'VARIABLES = "Distance","Time","n","u","p","T","H2O","A"' //for combustion wave output(Sec. 7.9)
file_18<<"VARIABLES = 'Distance','Time','n','u','p','T','A' "<<endl;
file_18<<"ZONE I= "<<geom->NCELLS+1<<", J= (set to number of output intervals+1), F=POINT"<<endl;
//
for(N=1;N<=geom->NCELLS+1;N++){
VALINT[1][N]=geom->XB[1]+(N-1)*geom->DDIV; //distance
VALINT[1][N]=VALINT[1][N]; //time
VALINT[2][N]=0.0;
VALINT[3][N]=gas->FND[1];
VALINT[4][N]=0;
VALINT[5][N]=gas->FND[1]*BOLTZ*gas->FTMP[1];
VALINT[6][N]=gas->FTMP[1];
// VALINT(7,N)=FSP(6,1) //FSP(6 for combustion wave
if((VALINT[1][N] > geom->XS) && (calc->ISECS == 1)){
VALINT[3][N]=gas->FND[2];
VALINT[5][N]=gas->FND[2]*BOLTZ*gas->FTMP[2];
VALINT[6][N]=gas->FTMP[2];
// VALINT(7,N)=FSP(6,2)
}
if(IOUT == 1){
VALINT[3][N]=1.e00;
VALINT[5][N]=1.e00;
VALINT[6][N]=1.e00;
}
for(M=1;M<=6;M++)
file_18<<VALINT[M][N]<<"\t";//WRITE (18,777) (VALINT(M,N),M=1,6),A
file_18<<A<<endl;
}
}
else{
calc->NLINE=calc->NLINE+1;
cout<<"J in tecplot file = "<<calc->NLINE<<endl;
if(geom->IVB == 0) C=geom->DDIV;
if(geom->IVB == 1) C=(geom->XB[2]+geom->VELOB*calc->FTIME-geom->XB[1])/double(geom->NDIV);
for(N=1;N<=geom->NCELLS+1;N++){
VALINT[1][N]=geom->XB[1]+(N-1)*C;
VALINT[2][N]=calc->FTIME;
if((N > 1) && (N < geom->NCELLS+1)){
VALINT[3][N]=0.5e00*(output->VAR[3][N]+output->VAR[3][N-1]);
VALINT[4][N]=0.5e00*(output->VAR[5][N]+output->VAR[5][N-1]);
VALINT[5][N]=0.5e00*(output->VAR[18][N]+output->VAR[18][N-1]);
VALINT[6][N]=0.5e00*(output->VAR[11][N]+output->VAR[11][N-1]);
// VALINT(7,N)=0.5D00*(VARSP(1,N,6)+VARSP(1,N-1,6)) //H2O fraction for Sec 7.9
}
}
for(N=3;N<=6;N++)
VALINT[N][1]=0.5e00*(3.e00*VALINT[N][2]-VALINT[N][3]);
//
for(N=3;N<=6;N++)
VALINT[N][geom->NCELLS+1]=0.5e00*(3.e00*VALINT[N][geom->NCELLS]-VALINT[N][geom->NCELLS-1]);
//
for(N=1;N<=geom->NCELLS+1;N++){
if(IOUT == 1){
VALINT[1][N]=(VALINT[1][N]-geom->XB[1])/(geom->XB[2]-geom->XB[1]);
VALINT[2][N]=VALINT[2][N]/calc->TNORM;
VALINT[3][N]=VALINT[3][N]/gas->FND[1];
VALINT[4][N]=VALINT[4][N]/gas->VMPM;
VALINT[5][N]=VALINT[5][N]/(gas->FND[1]*BOLTZ*gas->FTMP[1]);
VALINT[6][N]=VALINT[6][N]/gas->FTMP[1];
}
for(M=1;M<=6;M++)
file_18<<VALINT[M][N]<<"\t";//WRITE (18,777) (VALINT[M][N],M=1,6),A //
file_18<<A<<endl;
}
}
//
return;
}
void MOLECULES_MOVE_1D()
{//
//molecule moves appropriate to the time step
//for homogeneous and one-dimensional flows
//(homogeneous flows are calculated as one-dimensional)
//MOLECS molecs;
//GAS gas;
//GEOM_1D geom;
//CALC calc;
//OUTPUT output;
//
// IMPLICIT NONE
//
int N,L,M,K,NCI,J,II,JJ;
double A,B,X,XI,XC,DX,DY,DZ,DTIM,S1,XM,R,TI,DTC,POB,UR,WFI,WFR,WFRI;
//
//N working integer
//NCI initial cell time
//DTIM time interval for the move
//POB position of the outer boundary
//TI initial time
//DTC time interval to collision with surface
//UR radial velocity component
//WFI initial weighting factor
//WFR weighting factor radius
//WFRI initial weighting factor radius
//
if((geom->ITYPE[2] == 4) && (calc->ICN == 1)){
//memset(calc->ALOSS,0.e00,sizeof(*calc->ALOSS));//calc->ALOSS=0.e00;
for(int i=0;i<gas->MSP+1;i++)
calc->ALOSS[i]=0.e00;
calc->NMP=molecs->NM;
}
//
N=1;
while(N <= molecs->NM){
//
NCI=molecs->IPCELL[N];
if((calc->IMTS == 0) || (calc->IMTS == 2)) DTIM=calc->DTM;
if(calc->IMTS == 1) DTIM=2.e00*geom->CCELL[3][NCI];
if(calc->FTIME-molecs->PTIM[N] > 0.5*DTIM){
WFI=1.e00;
if(geom->IWF == 1) WFI=1.e00+geom->WFM*pow(molecs->PX[1][N],geom->IFX);
II=0; //becomes 1 if a molecule is removed
TI=molecs->PTIM[N];
molecs->PTIM[N]=TI+DTIM;
calc->TOTMOV=calc->TOTMOV+1;
//
XI=molecs->PX[1][N];
DX=DTIM*molecs->PV[1][N];
X=XI+DX;
//
if(geom->IFX > 0){
DY=0.e00;
DZ=DTIM*molecs->PV[3][N];
if(geom->IFX == 2) DY=DTIM*molecs->PV[2][N];
R=sqrt(X*X+DY*DY+DZ*DZ);
}
//
if(geom->IFX == 0){
for(J=1;J<=2;J++){ // 1 for minimum x boundary, 2 for maximum x boundary
if(II == 0){
if(((J == 1) && (X < geom->XB[1])) || ((J == 2) && (X > (geom->XB[2]+geom->VELOB*molecs->PTIM[N])))){ //molecule crosses a boundary
if((geom->ITYPE[J] == 0) || (geom->ITYPE[J] == 3) || (geom->ITYPE[J] == 4)){
if(geom->XREM > geom->XB[1]){
L=molecs->IPSP[N];
calc->ENTMASS=calc->ENTMASS-gas->SP[5][L];
}
if((geom->ITYPE[2] == 4) && (calc->ICN == 1)){
L=molecs->IPSP[N];
calc->ALOSS[L]=calc->ALOSS[L]+1.e00;
}
REMOVE_MOL(N);
N=N-1;
II=1;
}
//
if(geom->ITYPE[J] == 1){
if((geom->IVB == 0) || (J == 1)){
X=2.e00*geom->XB[J]-X;
molecs->PV[1][N]=-molecs->PV[1][N];
}
else if((J == 2) && (geom->IVB == 1)){
DTC=(geom->XB[2]+TI*geom->VELOB-XI)/(molecs->PV[1][N]-geom->VELOB);
XC=XI+molecs->PV[1][N]*DTC;
molecs->PV[1][N]=-molecs->PV[1][N]+2.*geom->VELOB;
X=XC+molecs->PV[1][N]*(DTIM-DTC);
}
}
//
if(geom->ITYPE[J] == 2)
REFLECT_1D(N,J,X);
// END IF
}
}
}
}
else{ //cylindrical or spherical flow
//check boundaries
if((X <geom-> XB[1]) && (geom->XB[1] > 0.e00)){
RBC(XI,DX,DY,DZ,geom->XB[1],S1);
if(S1 < 1.e00){ //intersection with inner boundary
if(geom->ITYPE[1] == 2){//solid surface
DX=S1*DX;
DY=S1*DY;
DZ=S1*DZ;
AIFX(XI,DX,DY,DZ,X,molecs->PV[1][N],molecs->PV[2][N],molecs->PV[3][N]);
REFLECT_1D(N,1,X);
}
else{
REMOVE_MOL(N);
N=N-1;
II=1;
}
}
}
else if((geom->IVB == 0) && (R > geom->XB[2])){
RBC(XI,DX,DY,DZ,geom->XB[2],S1);
if(S1 < 1.e00){ //intersection with outer boundary
if(geom->ITYPE[2] == 2){ //solid surface
DX=S1*DX;
DY=S1*DY;
DZ=S1*DZ;
AIFX(XI,DX,DY,DZ,X,molecs->PV[1][N],molecs->PV[2][N],molecs->PV[3][N]);
X=1.001e00*geom->XB[2];
while(X > geom->XB[2])
REFLECT_1D(N,2,X);
// END DO
}
else{
REMOVE_MOL(N);
N=N-1;
II=1;
}
}
}
else if((geom->IVB == 1) && (R > (geom->XB[2]+molecs->PTIM[N]*geom->VELOB))){
if(geom->IFX == 1) UR=sqrt(pow(molecs->PV[1][N],2)+pow(molecs->PV[2][N],2));
if(geom->IFX == 2) UR=sqrt(pow(molecs->PV[1][N],2)+pow(molecs->PV[2][N],2)+pow(molecs->PV[3][N],2));
DTC=(geom->XB[2]+TI*geom->VELOB-XI)/(UR-geom->VELOB);
S1=DTC/DTIM;
DX=S1*DX;
DY=S1*DY;
DZ=S1*DZ;
AIFX(XI,DX,DY,DZ,X,molecs->PV[1][N],molecs->PV[2][N],molecs->PV[3][N]);
molecs->PV[1][N]=-molecs->PV[1][N]+2.0*geom->VELOB;
X=X+molecs->PV[1][N]*(DTIM-DTC);
}
else
AIFX(XI,DX,DY,DZ,X,molecs->PV[1][N],molecs->PV[2][N],molecs->PV[3][N]);
//DIAGNOSTIC
if(II == 0){
if(X > geom->XB[2]+molecs->PTIM[N]*geom->VELOB){
//WRITE (*,*) N,calc->FTIME,X,geom->XB[2]+molecs->PTIM[N]*geom->VELOB;
cout<<N<<" "<<calc->FTIME<<" "<<X<<" "<<(geom->XB[2]+molecs->PTIM[N]*geom->VELOB)<<endl;
}
}
//Take action on weighting factors
if((geom->IWF == 1) && (II == 0)){
WFR=WFI/(1.e00+geom->WFM*pow(X,geom->IFX));
L=0;
WFRI=WFR;
if(WFR >= 1.e00){
while(WFR >= 1.e00){
L=L+1;
WFR=WFR-1.e00;
}
}
// CALL RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
if(calc->RANF <= WFR) L=L+1;
if(L == 0){
REMOVE_MOL(N);
N=N-1;
II=1;
}
L=L-1;
if(L > 0){
for(K=1;K<=L;K++){
if(molecs->NM >= molecs->MNM) EXTEND_MNM(1.1);
molecs->NM=molecs->NM+1;
molecs->PX[1][molecs->NM]=X;
for(M=1;M<=3;M++)
molecs->PV[M][molecs->NM]=molecs->PV[M][N];
if(gas->MMRM > 0) molecs->PROT[molecs->NM]=molecs->PROT[N];
molecs->IPCELL[molecs->NM]=fabs(molecs->IPCELL[N]);
molecs->IPSP[molecs->NM]=molecs->IPSP[N];
molecs->IPCP[molecs->NM]=molecs->IPCP[N];
if(gas->MMVM > 0){
for(M=1;M<=gas->MMVM;M++)
molecs->IPVIB[M][molecs->NM]=molecs->IPVIB[M][N];
}
molecs->PTIM[molecs->NM]=molecs->PTIM[N]; //+5.D00*DFLOAT(K)*DTM
//note the possibility of a variable time advance that may take the place of the duplication buffer in earlier programs
if(molecs->PX[1][molecs->NM] > geom->XB[2]+molecs->PTIM[molecs->NM]*geom->VELOB)
//WRITE (*,*) 'DUP',NM,FTIME,PX(1,NM),XB(2)+PTIM(NM)*VELOB
cout<<"DUP "<<molecs->NM<<" "<<calc->FTIME<<" "<<molecs->PX[1][molecs->NM]<<" "<<(geom->XB[2]+molecs->PTIM[molecs->NM]*geom->VELOB)<<endl;
}
}
}
}
//
if(II == 0){
molecs->PX[1][N]=X;
if(molecs->PX[1][N] > geom->XB[1] && (molecs->PX[1][N] < geom->XB[2]))
continue;
else{
cout<< N<<" OUTSIDE FLOWFIELD AT "<<molecs->PX[1][N]<<" VEL "<<molecs->PV[1][N]<<endl;
REMOVE_MOL(N);
N=N-1;
II=1;
}
}
//
if(II == 0){
if(geom->IVB == 0) FIND_CELL_1D(molecs->PX[1][N],molecs->IPCELL[N],JJ);
if(geom->IVB == 1) FIND_CELL_MB_1D(molecs->PX[1][N],molecs->IPCELL[N],JJ,molecs->PTIM[N]);
}
//
}
//
N=N+1;
}
//
return;
}
void READ_RESTART()
{
//MOLECS molecs;
//GEOM_1D geom;
//GAS gas;
//CALC calc;
//OUTPUT output;
// IMPLICIT NONE
//
fstream file_7;
int ZCHECK;
//
// 101 CONTINUE
_101:
file_7.open("PARAMETERS.DAT", ios::in | ios::binary);
if(file_7.is_open()){
cout<<"PARAMETERS.DAT opened successfully"<<endl;
file_7>>geom->NCCELLS>>geom->NCELLS>>gas->MMRM>>gas->MMVM>>molecs->MNM>>gas->MNSR>>gas->MSP>>geom->ILEVEL>>geom->MDIV>>gas->MMEX>>gas->MEX>>gas->MELE>>gas->MVIBL>>calc->NCLASS;
file_7.close();
}
else{
cout<<"PARAMETERS.DAT not opening"<<endl;
goto _101;
}
//cout<<geom->NCCELLS<<endl<<geom->NCELLS<<endl<<gas->MMRM<<endl<<gas->MMVM<<endl<<molecs->MNM<<endl;
// OPEN (7,FILE='PARAMETERS.DAT',FORM='BINARY',ERR=101)
// READ (7) NCCELLS,NCELLS,MMRM,MMVM,MNM,MNSR,MSP,ILEVEL,MDIV,MMEX,MEX,MELE,MVIBL,NCLASS
// CLOSE(7)
//
if(gas->MMVM > 0){
molecs->d_allocate(calc->NCLASS+1,molecs->MNM+1,molecs->PX);
molecs->d_allocate(molecs->MNM+1,molecs->PTIM);
molecs->d_allocate(molecs->MNM+1,molecs->PROT);
molecs->i_allocate(molecs->MNM+1,molecs->IPCELL);
molecs->i_allocate(molecs->MNM+1,molecs->IPSP);
molecs->i_allocate(molecs->MNM+1,molecs->ICREF);
molecs->i_allocate(molecs->MNM+1,molecs->IPCP);
molecs->d_allocate(4,molecs->MNM+1,molecs->PV);
molecs->i_allocate(gas->MMVM+1,molecs->MNM+1,molecs->IPVIB);
molecs->d_allocate(molecs->MNM+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNM),PTIM(MNM),PROT(MNM),IPCELL(MNM),IPSP(MNM),ICREF(MNM),IPCP(MNM),PV(3,MNM), &
// IPVIB(MMVM,MNM),PELE(MNM),STAT=ERROR)
}
else{
if(gas->MMRM > 0){
molecs->d_allocate(calc->NCLASS+1,molecs->MNM+1,molecs->PX);
molecs->d_allocate(molecs->MNM+1,molecs->PTIM);
molecs->d_allocate(molecs->MNM+1,molecs->PROT);
molecs->i_allocate(molecs->MNM+1,molecs->IPCELL);
molecs->i_allocate(molecs->MNM+1,molecs->IPSP);
molecs->i_allocate(molecs->MNM+1,molecs->ICREF);
molecs->i_allocate(molecs->MNM+1,molecs->IPCP);
molecs->d_allocate(4,molecs->MNM+1,molecs->PV);
molecs->d_allocate(molecs->MNM+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNM),PTIM(MNM),PROT(MNM),IPCELL(MNM),IPSP(MNM),ICREF(MNM),IPCP(MNM),PV(3,MNM),PELE(MNM),STAT=ERROR)
}
else{
molecs->d_allocate(calc->NCLASS+1,molecs->MNM+1,molecs->PX);
molecs->d_allocate(molecs->MNM+1,molecs->PTIM);
molecs->i_allocate(molecs->MNM+1,molecs->IPCELL);
molecs->i_allocate(molecs->MNM+1,molecs->IPSP);
molecs->i_allocate(molecs->MNM+1,molecs->ICREF);
molecs->i_allocate(molecs->MNM+1,molecs->IPCP);
molecs->d_allocate(4,molecs->MNM+1,molecs->PV);
molecs->d_allocate(molecs->MNM+1,molecs->PELE);
// ALLOCATE (PX(NCLASS,MNM),PTIM(MNM),IPCELL(MNM),IPSP(MNM),ICREF(MNM),IPCP(MNM),PV(3,MNM),PELE(MNM),STAT=ERROR)
}
}
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR MOLECULE ARRAYS',ERROR
// ENDIF
//
geom->i_allocate(geom->ILEVEL+1,geom->MDIV+1,geom->JDIV);
// ALLOCATE (JDIV(0:ILEVEL,MDIV),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR JDIV ARRAY',ERROR
// ENDIF
geom->d_allocate(5,geom->NCELLS+1,geom->CELL);
geom->i_allocate(geom->NCELLS+1,geom->ICELL);
geom->d_allocate(6,geom->NCCELLS+1,geom->CCELL);
geom->i_allocate(4,geom->NCCELLS+1,geom->ICCELL);
// ALLOCATE (CELL(4,NCELLS),ICELL(NCELLS),CCELL(5,NCCELLS),ICCELL(3,NCCELLS),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR CELL ARRAYS',ERROR
// ENDIF
output->d_allocate(geom->NCELLS+1,output->COLLS);
output->d_allocate(geom->NCELLS+1,output->WCOLLS);
output->d_allocate(geom->NCELLS+1,output->CLSEP);
output->d_allocate(gas->MNSR+1,output->SREAC);
output->d_allocate(24,geom->NCELLS+1,output->VAR);
output->d_allocate(13,geom->NCELLS+1,gas->MSP+1,output->VARSP);
output->d_allocate(36+gas->MSP,3,output->VARS);
output->d_allocate(10+gas->MSP,geom->NCELLS+1,gas->MSP+1,output->CS);
output->d_allocate(9,3,gas->MSP+1,3,output->CSS);
output->d_allocate(7,3,output->CSSS);
// ALLOCATE (COLLS(NCELLS),WCOLLS(NCELLS),CLSEP(NCELLS),SREAC(MNSR),VAR(23,NCELLS), &
// VARSP(0:12,NCELLS,MSP),VARS(0:35+MSP,2),CS(0:9+MSP,NCELLS,MSP),CSS(0:8,2,MSP,2),CSSS(6,2),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR SAMPLING ARRAYS',ERROR
// ENDIF
//
if(gas->MMVM >= 0){
output->d_allocate(gas->MSP+1,gas->MMVM+1,151,output->VIBFRAC);
output->d_allocate(gas->MSP+1,gas->MMVM+1,output->SUMVIB);
// ALLOCATE (VIBFRAC(MSP,MMVM,0:150),SUMVIB(MSP,MMVM),STAT=ERROR)
// IF (ERROR /= 0) THEN
// WRITE (*,*) 'PROGRAM COULD NOT ALLOCATE SPACE FOR RECOMBINATION ARRAYS',ERROR
// ENDIF
}
//
ALLOCATE_GAS();
//
//102 CONTINU
_102:
file_7.open("RESTART.DAT", ios::in | ios::binary);
if(file_7.is_open()){
cout<<"RESTART.DAT opened successfully"<<endl;
/*file_7>>calc->AJM>>calc->ALOSS>>output->AVDTM>>BOLTZ>>geom->CCELL>>geom->CELL>>output->CLSEP>>output->COLLS>>calc->CPDTM>>gas->CR>>output->CS>>output->CSS>>output->CSSS>>gas->CTM>>gas->CXSS>>geom->DDIV>>DPI>>calc->DTM>>calc->DTSAMP>>calc->DTOUT>>calc->EME>>calc->ENTMASS>>gas->ENTR>>calc->ENTREM>>calc->ERROR>>gas->ERS>>gas->FDEN>>gas->FMA>>gas->FND>>calc->FNUM>>calc->FRACSAM>>gas->FSP>>gas->FP>>gas->FPM>>gas->FPR>>geom->FREM>>gas->FSPEC>>gas->FTMP>>calc->FTIME>>gas->FVTMP>>geom->ICCELL>>geom->ICELL>>calc->ICLASS>>calc->ICN>>molecs->ICREF>>geom->IFX>>gas->IGAS>>calc->IMTS>>molecs->IPCELL>>molecs->IPCP>>molecs->IPSP>>molecs->IPVIB>>calc->IREM>>calc->ISAD>>calc->ISECS>>calc->ISF>>gas->ISPEX>>gas->ISPR>>gas->ISPRC>>gas->ISPRK>>gas->ISPV>>gas->ISPVM>>gas->ISRCD>>geom->ITYPE>>geom->IVB>>geom->IWF>>geom->JDIV>>gas->LIS>>gas->LRS>>calc->MOLSC>>calc->MVER>>geom->NCCELLS>>geom->NCELLS>>geom->NCIS>>geom->NDIV>>gas->NELL>>gas->NEX>>calc->NLINE>>molecs->NM>>output->NMISAMP>>calc->NNC>>output->NOUT>>output->NSAMP>>gas->NSLEV>>gas->NSPEX>>calc->NREL>>calc->NVER>>molecs->PELE>>PI>>molecs->PROT>>molecs->PTIM>>molecs->PV>>molecs->PX>>gas->QELC>>gas->RGFS>>gas->RMAS>>gas->SLER>>gas->SP>>gas->SPEX>>SPI>>gas->SPM>>gas->SPR>>gas->SPRC>>gas->SPREX>>gas->SPRP>>gas->SPRT>>gas->SPV>>gas->SPVM>>output->SREAC>>output->SUMVIB>>calc->TCOL>>calc->TDISS>>calc->TRECOMB>>output->TISAMP>>calc->TPOUT>>calc->TREF>>calc->TLIM>>calc->TOTCOL>>calc->TOTMOV>>gas->TREACG>>gas->TREACL>>calc->TOUT>>calc->TPDTM>>calc->TREF>>calc->TSAMP>>gas->TSURF>>output->VAR>>output->VARS>>output->VARSP>>geom->VELOB>>gas->VFX>>gas->VFY>>output->VIBFRAC>>gas->VMP>>gas->VMPM>>calc->VNMAX>>gas->VSURF>>output->WCOLLS>>geom->WFM>>geom->XB>>geom->XREM>>output->XVELS>>output->YVELS>>gas->TNEX>>ZCHECK>>endl;*/
file_7.read((char*)&calc,sizeof(calc));
file_7.read((char*)&molecs,sizeof(molecs));
file_7.read((char*)&gas,sizeof(gas));
file_7.read((char*)&geom,sizeof(geom));
file_7.read((char*)&output,sizeof(output));
file_7.close();
}
else{
cout<<"Restart.DAT not opening"<<endl;
goto _102;
}
// OPEN (7,FILE='RESTART.DAT',FORM='BINARY',ERR=102)
// READ (7) AJM,ALOSS,AVDTM,BOLTZ,CCELL,CELL,CLSEP,COLLS, &
// CPDTM,CR,CS,CSS,CSSS,CTM,CXSS,DDIV,DPI,DTM,DTSAMP,DTOUT,EME, &
// ENTMASS,ENTR,ENTREM,ERROR,ERS,FDEN,FMA,FND,FNUM,FRACSAM,FSP,FP,FPM,FPR,FREM,FSPEC, &
// FTMP,FTIME,FVTMP,ICCELL,ICELL,ICLASS,ICN,ICREF,IFX,IGAS,IMTS,IPCELL,IPCP, &
// IPSP,IPVIB,IREM,ISAD,ISECS,ISF,ISPEX,ISPR,ISPRC,ISPRK,ISPV,ISPVM,ISRCD,ITYPE,IVB,IWF, &
// JDIV,LIS,LRS,MOLSC,MVER,NCCELLS,NCELLS, &
// NCIS,NDIV,NELL,NEX,NLINE,NM,NMISAMP,NNC,NOUT,NSAMP,NSLEV,NSPEX,NREL,NVER,PELE,PI,PROT,PTIM,PV,PX, &
// QELC,RGFS,RMAS,SLER,SP,SPEX,SPI,SPM,SPR,SPRC,SPREX,SPRP,SPRT,SPV,SPVM,SREAC,SUMVIB, &
// TCOL,TDISS,TRECOMB,TISAMP,TPOUT,TREF,TLIM,TOTCOL,TOTMOV, &
// TREACG,TREACL,TOUT,TPDTM,TREF,TSAMP,TSURF,VAR,VARS,VARSP,VELOB,VFX,VFY,VIBFRAC,VMP, &
// VMPM,VNMAX,VSURF,WCOLLS,WFM,XB,XREM,XVELS,YVELS,TNEX,ZCHECK
// //
// CLOSE(7)
//
if(ZCHECK != 1234567){
file_9<<molecs->NM<<" Molecules, Check integer = "<<ZCHECK<<endl;
//WRITE (9,*) NM,' Molecules, Check integer =',ZCHECK
return ;
}
else
file_9<<"Restart file read, Check integer= "<<ZCHECK<<endl;
//WRITE (9,*) 'Restart file read, Check integer=',ZCHECK
//
return;
//
}
//*****************************************************************************
void WRITE_RESTART()
{
//MOLECS molecs;
//GEOM_1D geom;
//GAS gas;
//CALC calc;
//OUTPUT output;
// IMPLICIT NONE
//
int ZCHECK;
//
fstream file_7;
ZCHECK=1234567;
//
//101 CONTINUE
_101:
file_7.open("PARAMETERS.DAT", ios::out | ios::binary);
if(file_7.is_open()){
file_7<<geom->NCCELLS<<endl<<geom->NCELLS<<endl<<gas->MMRM<<endl<<gas->MMVM<<endl<<molecs->MNM<<endl<<gas->MNSR<<endl<<gas->MSP<<endl<<geom->ILEVEL<<endl<<geom->MDIV<<endl<<gas->MMEX<<endl<<gas->MEX<<endl<<gas->MELE<<endl<<gas->MVIBL<<endl<<calc->NCLASS<<endl;
file_7.close();
}
else{
cout<<"Parameters.DAT file not opening(write)"<<endl;
goto _101;
}
// OPEN (7,FILE='PARAMETERS.DAT',FORM='BINARY',ERR=101)
// WRITE (7) NCCELLS,NCELLS,MMRM,MMVM,MNM,MNSR,MSP,ILEVEL,MDIV,MMEX,MEX,MELE,MVIBL,NCLASS
// CLOSE(7)
//
// 102 CONTINUE
_102:
file_7.open("RESTART.DAT", ios::out | ios::binary);
if(file_7.is_open()){
/*file_7<<calc->AJM<<calc->ALOSS<<output->AVDTM<<BOLTZ<<geom->CCELL<<geom->CELL<<output->CLSEP<<output->COLLS<<calc->CPDTM<<gas->CR<<output->CS<<output->CSS<<output->CSSS<<gas->CTM<<gas->CXSS<<geom->DDIV<<DPI<<calc->DTM<<calc->DTSAMP<<calc->DTOUT<<calc->EME<<calc->ENTMASS<<gas->ENTR<<calc->ENTREM<<calc->ERROR<<gas->ERS<<gas->FDEN<<gas->FMA<<gas->FND<<calc->FNUM<<calc->FRACSAM<<gas->FSP<<gas->FP<<gas->FPM<<gas->FPR<<geom->FREM<<gas->FSPEC<<gas->FTMP<<calc->FTIME<<gas->FVTMP<<geom->ICCELL<<geom->ICELL<<calc->ICLASS<<calc->ICN<<molecs->ICREF<<geom->IFX<<gas->IGAS<<calc->IMTS<<molecs->IPCELL<<molecs->IPCP<<molecs->IPSP<<molecs->IPVIB<<calc->IREM<<calc->ISAD<<calc->ISECS<<calc->ISF<<gas->ISPEX<<gas->ISPR<<gas->ISPRC<<gas->ISPRK<<gas->ISPV<<gas->ISPVM<<gas->ISRCD<<geom->ITYPE<<geom->IVB<<geom->IWF<<geom->JDIV<<gas->LIS<<gas->LRS<<calc->MOLSC<<calc->MVER<<geom->NCCELLS<<geom->NCELLS<<geom->NCIS<<geom->NDIV<<gas->NELL<<gas->NEX<<calc->NLINE<<molecs->NM<<output->NMISAMP<<calc->NNC<<output->NOUT<<output->NSAMP<<gas->NSLEV<<gas->NSPEX<<calc->NREL<<calc->NVER<<molecs->PELE<<PI<<molecs->PROT<<molecs->PTIM<<molecs->PV<<molecs->PX<<gas->QELC<<gas->RGFS<<gas->RMAS<<gas->SLER<<gas->SP<<gas->SPEX<<SPI<<gas->SPM<<gas->SPR<<gas->SPRC<<gas->SPREX<<gas->SPRP<<gas->SPRT<<gas->SPV<<gas->SPVM<<output->SREAC<<output->SUMVIB<<calc->TCOL<<calc->TDISS<<calc->TRECOMB<<output->TISAMP<<calc->TPOUT<<calc->TREF<<calc->TLIM<<calc->TOTCOL<<calc->TOTMOV<<gas->TREACG<<gas->TREACL<<calc->TOUT<<calc->TPDTM<<calc->TREF<<calc->TSAMP<<gas->TSURF<<output->VAR<<output->VARS<<output->VARSP<<geom->VELOB<<gas->VFX<<gas->VFY<<output->VIBFRAC<<gas->VMP<<gas->VMPM<<calc->VNMAX<<gas->VSURF<<output->WCOLLS<<geom->WFM<<geom->XB<<geom->XREM<<output->XVELS<<output->YVELS<<gas->TNEX<<ZCHECK<<endl;*/
file_7.write((char*)&calc,sizeof(calc));
file_7.write((char*)&molecs,sizeof(molecs));
file_7.write((char*)&gas,sizeof(gas));
file_7.write((char*)&geom,sizeof(geom));
file_7.write((char*)&output,sizeof(output));
file_7.close();
}
else{
cout<<"Restart.DAT file not opening(write)"<<endl;
goto _101;
}
// OPEN (7,FILE='RESTART.DAT',FORM='BINARY',ERR=102)
// WRITE (7)AJM,ALOSS,AVDTM,BOLTZ,CCELL,CELL,CLSEP,COLLS, &
// CPDTM,CR,CS,CSS,CSSS,CTM,CXSS,DDIV,DPI,DTM,DTSAMP,DTOUT,EME, &
// ENTMASS,ENTR,ENTREM,ERROR,ERS,FDEN,FMA,FND,FNUM,FRACSAM,FSP,FP,FPM,FPR,FREM,FSPEC, &
// FTMP,FTIME,FVTMP,ICCELL,ICELL,ICLASS,ICN,ICREF,IFX,IGAS,IMTS,IPCELL,IPCP, &
// IPSP,IPVIB,IREM,ISAD,ISECS,ISF,ISPEX,ISPR,ISPRC,ISPRK,ISPV,ISPVM,ISRCD,ITYPE,IVB,IWF, &
// JDIV,LIS,LRS,MOLSC,MVER,NCCELLS,NCELLS, &
// NCIS,NDIV,NELL,NEX,NLINE,NM,NMISAMP,NNC,NOUT,NSAMP,NSLEV,NSPEX,NREL,NVER,PELE,PI,PROT,PTIM,PV,PX, &
// QELC,RGFS,RMAS,SLER,SP,SPEX,SPI,SPM,SPR,SPRC,SPREX,SPRP,SPRT,SPV,SPVM,SREAC,SUMVIB, &
// TCOL,TDISS,TRECOMB,TISAMP,TPOUT,TREF,TLIM,TOTCOL,TOTMOV, &
// TREACG,TREACL,TOUT,TPDTM,TREF,TSAMP,TSURF,VAR,VARS,VARSP,VELOB,VFX,VFY,VIBFRAC,VMP, &
// VMPM,VNMAX,VSURF,WCOLLS,WFM,XB,XREM,XVELS,YVELS,TNEX,ZCHECK
// //
// CLOSE(7)
//
file_9<<"Restart files written"<<endl;
//WRITE (9,*) 'Restart files written'
//
return;
}
void OUTPUT_RESULTS()
{
//--calculate the surface and flowfield properties
//--generate TECPLOT files for displaying these properties
//--calculate collisiion rates and flow transit times and reset time intervals
//--add molecules to any flow plane molecule output files
//CALC calc;
//MOLECS molecs;
//GAS gas;
//OUTPUT output;
//GEOM_1D geom;
fstream file_3;
fstream file_10;
fstream file_7;
int IJ,J,JJ,K,L,LL,M,N,NN,NMCR,CTIME,II;
long long NNN;
double AS,AT,C1,C2,C3,C4,C5,C6,C7,C8,C9;
double A,B,C,SDTM,SMCR,DOF,AVW,UU,VDOFM,TVIBM,VEL,DTMI,TT;
//dout
double SUM[14];
double SUMS[10][3];
double *TVIB,*VDOF,*PPA,*TEL,*ELDOF,*SDOF,*CDTM;
double **TV,**THCOL;
double ***DF;
int *NMS;
// REAL(KIND=8), ALLOCATABLE, DIMENSION(:) :: TVIB,VDOF,PPA,TEL,ELDOF,SDOF,CDTM
// REAL(KIND=8), ALLOCATABLE, DIMENSION(:,:) :: TV,THCOL
// REAL(KIND=8), ALLOCATABLE, DIMENSION(:,:,:) :: DF
// INTEGER, ALLOCATABLE, DIMENSION(:) :: NMS
//INTEGER, ALLOCATABLE, DIMENSION(:,:) ::
string F,E;
//--CTIME computer time (microseconds)
//--SUMS(N,L) sum over species of CSS(N,J,L,M) for surface properties
//
//--For flowfield properties,where <> indicates sampled sum
//--SUM(0) the molecular number sum over all species
//--SUM(1) the weighted number sum over all species
//--SUM(2) the weighted sum of molecular masses
//--SUM(3),(4),(5) the weighted sum over species of m*<u>,<v>,<w>
//--SUM(6) the weighted sum over species of m*(<u**2>+<v**2>+<w**2>)
//--SUM(7) the weighted sum over species of <u**2>+<v**2>+<w**2>
//--SUM(8) the weighted sum of rotational energy
//--SUM(9) the weighted sum of rotational degrees of freedom
//--SUM(10) the weighted sum over species of m*<u**2>
//--SUM(11) the weighted sum over species of m*<v**2>
//--SUM(12) sum over species of m*<w**2>
//--SUM(13) the weighted sum of electronic energy
//--UU velocity squared
//--DOF degrees of freedom
//--AVW the average value of the viscosity-temperature exponent
//--DVEL velocity difference
//--TVEL thermal speed
//--SMCR sum of mcs/mfp over cells
//--NMCR number in the sum
//--VDOFM effective vibrational degrees of freedom of mixture
//--TVIB(L)
//--VDOF(L)
//--TV(K,L) the temperature of vibrational mode K of species L
//--PPA particles per atom
//--NMS number per species
//--SDOF(L) total degrees of freedom for species L
//
//
//--calculate the flowfield properties in the cells
//dout
TV = new double*[gas->MMVM+1];
for(int i =0; i< gas->MMVM+1; ++i)
TV[i] = new double[gas->MSP+1];
TVIB = new double[gas->MSP+1];
DF = new double **[geom->NCELLS+1];
for (int i = 0; i < geom->NCELLS+1; ++i)
{
DF[i] = new double *[gas->MMVM+1];
for (int j = 0; j < gas->MMVM+1; ++j)
DF[i][j] = new double [gas->MSP+1];
}
VDOF= new double[gas->MSP+1];
TEL = new double[gas->MSP+1];
ELDOF = new double[gas->MSP+1];
PPA = new double[gas->MSP+1];
NMS = new int[gas->MSP+1];
THCOL = new double*[gas->MSP+1];
for(int i =0; i< gas->MSP+1; ++i)
THCOL[i] = new double[gas->MSP+1];
SDOF = new double[gas->MSP+1];
CDTM = new double[geom->NCELLS+1];
// ALLOCATE (TV(MMVM,MSP),TVIB(MSP),DF(NCELLS,MMVM,MSP),VDOF(MSP),TEL(MSP),ELDOF(MSP),PPA(MSP),NMS(MSP),THCOL(MSP,MSP) &
// ,SDOF(MSP),CDTM(NCELLS),STAT=ERROR)
// if(calc->ERROR!=0)
// {
// cout<<"ROGRAM COULD NOT ALLOCATE OUTPUT VARIABLES"<<calc->ERROR<<endl;
// }
if(calc->FTIME>0.5e00*calc->DTM)
{
output->NOUT+=1;
if(output->NOUT>9999)
output->NOUT=output->NOUT-9999;
cout<<"Generating files for output interval"<<output->NOUT<<endl;
if(calc->ISF==0)
{
//dout
//OPEN (3,FILE='DS1OUT.DAT')
file_3.open("DS1OUT.DAT" , ios::out);
if(file_3.is_open()){
cout<<"DS1OUT.DAT is opened"<<endl;
}
else{
cout<<"DS1OUT.DAT not opened"<<endl;
}
//F='DS';//E//'.OUT'
}
else
{
//--the files are DS1n.DAT, where n is a four digit integer equal to NOUT
//dout
//500 FORMAT(I5)
//ENCODE(5,500,E) 10000+NOUT
int a=output->NOUT+10000;
E=to_string(a);
F="DS" + E + "OUT.DAT";
//dout
//file_3.open(F.c_str(), ios::out|ios::binary);
if(file_3.is_open()){
cout<<F<<" is opened"<<endl;
}
else{
cout<<F<<" not opened"<<endl;
}
//OPEN (3,FILE=F)
}
}
//dout
//memset(output->VAR,0.e00,sizeof(**output->VAR));
for(int i=0;i<24;i++){
for(int j=0;j<geom->NCELLS+1;j++)
output->VAR[i][j]=0.e00;
}
if(geom->IFX==0)
A=calc->FNUM/(calc->FTIME-output->TISAMP);
for(JJ=1;JJ<=2;JJ++)
{
if(geom->IFX==1)
A=calc->FNUM/(2.e00*PI*geom->XB[JJ])*(calc->FTIME-output->TISAMP);
if(geom->IFX==2)
A=calc->FNUM/(4.e00*PI*geom->XB[JJ])*geom->XB[JJ]*(calc->FTIME-output->TISAMP);
//--JJ=1 for surface at XB(1), JJ=2 for surface at XB(2)
if(geom->ITYPE[JJ]==2)
{
//dout
//memset(SUMS,0.e00,sizeof(SUMS));
for(int i=0;i<10;i++){
for(int j=0;j<3;j++)
SUMS[i][j]=0.e00;
}
for( L=1;L<=gas->MSP;L++)
{
for(J=0;J<=8;J++)
{
for(IJ=1;IJ<=2;IJ++)
{
SUMS[J][IJ]=SUMS[J][IJ]+output->CSS[J][JJ][L][IJ];
}
}
}
output->VARS[0][JJ]=SUMS[0][1];
output->VARS[1][JJ]=SUMS[1][1];
output->VARS[2][JJ]=SUMS[1][2];
output->VARS[3][JJ]=SUMS[1][1]*A;
output->VARS[4][JJ]=SUMS[1][2]*A;
output->VARS[5][JJ]=SUMS[2][1]*A;
output->VARS[6][JJ]=SUMS[2][2]*A;
output->VARS[7][JJ]=SUMS[3][1]*A;
output->VARS[8][JJ]=SUMS[3][2]*A;
output->VARS[9][JJ]=SUMS[4][1]*A;
output->VARS[10][JJ]=SUMS[4][2]*A;
output->VARS[11][JJ]=SUMS[5][1]*A;
output->VARS[12][JJ]=SUMS[5][2]*A;
output->VARS[13][JJ]=SUMS[6][1]*A;
output->VARS[14][JJ]=SUMS[6][2]*A;
output->VARS[15][JJ]=SUMS[7][1]*A;
output->VARS[16][JJ]=SUMS[7][2]*A;
output->VARS[33][JJ]=SUMS[8][1]*A;
output->VARS[34][JJ]=SUMS[8][2]*A;
// VARS(17,JJ)=SUMS(9,1)*A //--SURFACE REACTIONS NOT YET IMPLEMENTED
// VARS(18,JJ)=SUMS(9,2)*A
if(output->CSSS[1][JJ]>1.e-6)
{
output->VARS[19][JJ]=output->CSSS[3][JJ]/output->CSSS[2][JJ]; ////--n.b. must be modified to include second component in 3D
output->VARS[20][JJ]=(output->CSSS[4][JJ]-output->CSSS[2][JJ]*output->VARS[19][JJ]*output->VARS[19][JJ])/(output->CSSS[1][JJ]*3.e00*BOLTZ)-gas->TSURF[JJ];
output->VARS[19][JJ]=output->VARS[19][JJ]-gas->VSURF[JJ];
if(output->CSSS[6][JJ]>1.e-6)
{
output->VARS[21][JJ]=(2.e000/BOLTZ)*(output->CSSS[5][JJ]/output->CSSS[6][JJ])-gas->TSURF[JJ];
}
else
{
output->VARS[21][JJ]=0.e00;
}
}
else
{
output->VARS[19][JJ]=0.e00;
output->VARS[20][JJ]=0.e00;
output->VARS[21][JJ]=0.e00;
}
output->VARS[22][JJ]=(SUMS[2][1]+SUMS[2][2])*A;
output->VARS[23][JJ]=(SUMS[3][1]+SUMS[3][2])*A;
output->VARS[24][JJ]=(SUMS[4][1]+SUMS[4][2])*A;
output->VARS[25][JJ]=(SUMS[5][1]+SUMS[5][2])*A;
output->VARS[26][JJ]=(SUMS[6][1]+SUMS[6][2])*A;
output->VARS[27][JJ]=(SUMS[7][1]+SUMS[7][2])*A;
output->VARS[28][JJ]=(SUMS[9][1]+SUMS[9][2])*A;
output->VARS[29][JJ]=output->VARS[11][JJ]+output->VARS[13][JJ]+output->VARS[15][JJ]+output->VARS[33][JJ];
output->VARS[30][JJ]=output->VARS[12][JJ]+output->VARS[14][JJ]+output->VARS[16][JJ]+output->VARS[34][JJ];
output->VARS[31][JJ]=output->VARS[29][JJ]+output->VARS[30][JJ];
output->VARS[35][JJ]=output->VARS[33][JJ]+output->VARS[34][JJ];
for(L=1;gas->MSP;L++)
{
if(SUMS[1][1]>0)
{
output->VARS[35+L][JJ]=100*output->CSS[1][JJ][L][1]/SUMS[1][1];
}
else
{
output->VARS[35+L][JJ]=0.0;
}
}
}
}
//output->VARSP=0;
for(int i=0;i<13;i++){
for(int j=0;j<geom->NCELLS+1;j++){
for(int k=0;k<gas->MSP+1;k++)
output->VARSP[i][j][k]=0;
}
}
SMCR=0;
NMCR=0;
for(N=1;N<=geom->NCELLS;N++)
{
if(N==120)
{
continue;
}
A=calc->FNUM/(geom->CELL[4][N])*output->NSAMP;
if(geom->IVB==1)
A=A*pow((geom->XB[2]-geom->XB[1])/(geom->XB[2]+geom->VELOB*0.5e00*(calc->FTIME-output->TISAMP)-geom->XB[1]),geom->IFX+1);
//--check the above for non-zero XB(1)
//dout
//memset(SUM,0,sizeof(SUM));
for(int i=0;i<14;i++)
SUM[i]=0;
NMCR+=1;
for(L=1;L<=gas->MSP;L++)
{
SUM[0]=SUM[0]+output->CS[0][N][L];
SUM[1]=SUM[1]+output->CS[1][N][L];
SUM[2]=SUM[2]+gas->SP[5][L]*output->CS[0][N][L];
for(K=1;K<=3;K++)
{
SUM[K+2]=SUM[K+2]+gas->SP[5][L]*output->CS[K+1][N][L];
if(output->CS[1][N][L]>1.1e00)
{
output->VARSP[K+1][N][L]=output->CS[K+4][N][L]/output->CS[1][N][L];
//--VARSP(2,3,4 are temporarily the mean of the squares of the velocities
output->VARSP[K+8][N][L]=output->CS[K+1][N][L]/output->CS[1][N][L];
}
}
SUM[6]=SUM[6]+gas->SP[5][L]*(output->CS[5][N][L]+output->CS[6][N][L]+output->CS[7][N][L]);
SUM[10]=SUM[10]+gas->SP[5][L]*output->CS[5][N][L];
SUM[12]=SUM[11]+gas->SP[5][L]*output->CS[6][N][L];
SUM[12]=SUM[12]+gas->SP[5][L]*output->CS[7][N][L];
SUM[13]=SUM[13]+output->CS[9][N][L];
if(output->CS[1][N][L]>0.5e00)
SUM[7]=SUM[7]+output->CS[5][N][L]+output->CS[6][N][L]+output->CS[7][N][L];
if(gas->ISPR[1][L]>0)
{
SUM[8]=SUM[8]+output->CS[8][N][L];
SUM[9]=SUM[9]+output->CS[1][N][L]*gas->ISPR[1][L];
}
}
AVW=0;
for(L=1;L<=gas->MSP;L++)
{
output->VARSP[0][N][L]=output->CS[1][N][L];
output->VARSP[1][N][L]=0.e00;
output->VARSP[6][N][L]=0.0;
output->VARSP[7][N][L]=0.0;
output->VARSP[8][N][L]=0.0;
if(SUM[1]>0.1)
{
output->VARSP[1][N][L]=output->CS[1][N][L]/SUM[1];
AVW=AVW+gas->SP[3][L]*output->CS[1][N][L]/SUM[1];
if(gas->ISPR[1][L]>0 && output->CS[1][N][L]>0.5)
output->VARSP[6][N][L]=(2.e00/BOLTZ)*output->CS[8][N][L]/((double)(gas->ISPR[1][L])*output->CS[1][N][L]);
}
output->VARSP[5][N][L]=0;
for(K=1;K<=3;K++)
{
output->VARSP[K+1][N][L]=(gas->SP[5][L]/BOLTZ)*(output->VARSP[K+1][N][L]-pow(output->VARSP[K+8][N][L],2));
output->VARSP[5][N][L]=output->VARSP[5][N][L]+output->VARSP[K+1][N][L];
}
output->VARSP[5][N][L]=output->VARSP[5][N][L]/3.e00;
output->VARSP[8][N][L]=(3.e00*output->VARSP[5][N][L]+(double)gas->ISPR[1][L]*output->VARSP[6][N][L])/(3.e00+(double)(gas->ISPR[1][L]));
}
if(geom->IVB==0)
output->VAR[1][N]=geom->CELL[1][N];
if(geom->IVB==1)
{
C=(geom->XB[2]+geom->VELOB*calc->FTIME-geom->XB[1])/(double)(geom->NDIV); //new DDIV
output->VAR[1][N]=geom->XB[1]+((double)(N-1)+0.5)*C;
}
output->VAR[2][N]=SUM[0];
if(SUM[1]>0.5)
{
output->VAR[3][N]=SUM[1]*A;//--number density Eqn. (4.28)
output->VAR[4][N]=output->VAR[3][N]*SUM[2]/SUM[1]; //--density Eqn. (4.29)
output->VAR[5][N]=SUM[3]/SUM[2];//--u velocity component Eqn. (4.30)
output->VAR[6][N]=SUM[4]/SUM[2]; //--v velocity component Eqn. (4.30)
output->VAR[7][N]=SUM[5]/SUM[2]; //--w velocity component Eqn. (4.30)
UU= pow(output->VAR[5][N],2)+pow(output->VAR[6][N],2)+pow(output->VAR[7][N],2);
if(SUM[1]>1)
{
output->VAR[8][N]=(fabs(SUM[6]-SUM[2]*UU))/(3.e00*BOLTZ*SUM[1]); //Eqn. (4.39)
//--translational temperature
output->VAR[19][N]=fabs(SUM[10]-SUM[2]*pow(output->VAR[5][N],2))/(BOLTZ*SUM[1]);
output->VAR[20][N]=fabs(SUM[11]-SUM[2]*pow(output->VAR[6][N],2))/(BOLTZ*SUM[1]);
output->VAR[21][N]=fabs(SUM[12]-SUM[2]*pow(output->VAR[7][N],2))/(BOLTZ*SUM[1]);
}
else
{
output->VAR[8][N]=1.0;
output->VAR[19][N]=1.0;
output->VAR[20][N]=1.0;
output->VAR[21][N]=1.0;
}
if(SUM[9]>0.1e00)
{
output->VAR[9][N]=(2.e00/BOLTZ)*SUM[8]/SUM[9]; ////--rotational temperature Eqn. (4.36)
}
else
output->VAR[9][N]=0.0;
output->VAR[10][N]=gas->FTMP[1]; ////vibration default
DOF=(3.e00+SUM[9])/SUM[1];
output->VAR[11][N]=(3.0*output->VAR[8][N]+(SUM[9]/SUM[1]))*output->VAR[9][N]/DOF;
//--overall temperature based on translation and rotation
output->VAR[18][N]=output->VAR[3][N]*BOLTZ*output->VAR[8][N];
//--scalar pressure (now (from V3) based on the translational temperature)
if(gas->MMVM>0)
{
for(L=1;L<=gas->MSP;L++)
{
VDOF[L]=0.0;
//dout
if(gas->ISPV[L] > 0)
{
for(K=1;K<=gas->ISPV[L];K++)
{
if(output->CS[K+9][N][L]<BOLTZ)
{
TV[K][L]=0.0;
DF[N][K][L]=0.0;
}
else
{
TV[K][L]=gas->SPVM[1][K][L]/log(1.0+output->CS[1][N][L]/output->CS[K+9][N][L]) ;//--Eqn.(4.45)
DF[N][K][L]=2.0*(output->CS[K+9][N][L]/output->CS[1][N][L])*log(1.0+output->CS[1][N][L]/output->CS[K+9][N][L]); //--Eqn. (4.46)
}
VDOF[L]=VDOF[L]+DF[N][K][L];
}
//memset(TVIB,0.0,sizeof(*TVIB));
for(int i=0;i<gas->MSP+1;i++)
TVIB[i]=0.0;
for(K=1;K<=gas->ISPV[L];K++)
{
if(VDOF[L]>1.e-6)
{
TVIB[L]=TVIB[L]+TV[K][L]*DF[N][K][L]/VDOF[L];
}
else
TVIB[L]=gas->FVTMP[1];
}
}
else
{
TVIB[L]=calc->TREF;
VDOF[L]=0.0;
}
output->VARSP[7][N][L]=TVIB[L];
}
VDOFM=0.0;
TVIBM=0.0;
A=0.e00;
for(L=1;L<=gas->MSP;L++)
{
//dout
if(gas->ISPV[L] > 0)
{
A=A+output->CS[1][N][L];
}
}
for(L=1;L<=gas->MSP;L++)
{
//dout
if(gas->ISPV[L] > 0)
{
VDOFM=VDOFM+VDOF[L]-output->CS[1][N][L]/A;
TVIBM=TVIBM+TVIB[L]-output->CS[1][N][L]/A;
}
}
output->VAR[10][N]=TVIBM;
}
for(L=1;L<=gas->MSP;L++)
{
if(output->VARSP[0][N][L]>0.5)
{
//--convert the species velocity components to diffusion velocities
for(K=1;K<=3;K++)
{
output->VARSP[K+8][N][L]=output->VARSP[K+8][N][L]-output->VAR[K+4][N];
}
if(gas->MELE>1)
{
//--calculate the electronic temperatures for the species
//memset(ELDOF,0.e00,sizeof(*ELDOF));
for(int i=0;i<gas->MSP+1;i++)
ELDOF[i] = 0.e00;
//dout
//memset(TEL,0.e00,sizeof(*TEL));
for(int i=0;i<gas->MSP+1;i++)
TEL[i] = 0.e00;
if(gas->MELE>1)
{
A=0.e00;
B=0.e00;
for(M=1;M<=gas->NELL[L];M++)
{
if(output->VARSP[5][N][L]>1.e00)
{
C=gas->QELC[2][M][L]/output->VARSP[5][N][L];
A=A+gas->QELC[1][M][L]*exp(-C);
B=B+gas->QELC[1][M][L]*C*exp(-C);
}
}
if(B>1.e-10)
{
TEL[L]=output->CS[9][N][L]/output->CS[1][N][L]/(BOLTZ*B/A);
}
else
TEL[L]=output->VAR[11][N];
output->VARSP[12][N][L]=TEL[L];
ELDOF[L]=0.e00;
if(output->VARSP[5][N][L]>1.e00)
ELDOF[L]=2.e00*output->CS[9][N][L]/output->CS[1][N][L]/(BOLTZ*output->VARSP[5][N][L]);
if(ELDOF[L]<0.01)
{
output->VARSP[12][N][L]=output->VAR[11][N];
}
}
else
{
ELDOF[L]=0.0;
}
}
}
else
{
for(K=8;K<=12;K++)
{
output->VARSP[K][N][L]=0.e00;
}
}
}
//--set the overall electronic temperature
if(gas->MELE>1)
{
C=0.e00;
for(L=1;L<=gas->MSP;L++)
{
if(ELDOF[L]>1.e-5)
C=C+output->CS[1][N][L];
}
if(C>0.e00)
{
A=0.e00;
B=0.e00;
for(L=1;L<=gas->MSP;L++)
{
if(ELDOF[L]>1.e-5)
{
A=A+output->VARSP[12][N][L]*output->CS[1][N][L];
B=B+output->CS[1][N][L];
}
}
output->VAR[22][N]=A/B;
}
else{
output->VAR[22][N]=output->VAR[11][N];
}
}
else{
output->VAR[22][N]=gas->FTMP[1];
}
if(gas->MMVM>0)
{
//--set the overall temperature and degrees of freedom for the individual species
for(L=1;L<=gas->MSP;L++)
{
if(gas->MELE>1){
SDOF[L]=3.e00+gas->ISPR[1][L]+VDOF[L]+ELDOF[L];
output->VARSP[8][N][L]=(3.0*output->VARSP[5][N][L]+gas->ISPR[1][L]*output->VARSP[6][N][L]+VDOF[L]*output->VARSP[7][N][L]+ELDOF[L]*output->VARSP[12][N][L])/SDOF[L];
}
else{
SDOF[L]=3.e00+gas->ISPR[1][L]+VDOF[L]+ELDOF[L];
output->VARSP[8][N][L]=(3.0*output->VARSP[5][N][L]+gas->ISPR[1][L]*output->VARSP[6][N][L]+VDOF[L]*output->VARSP[7][N][L])/SDOF[L];
}
}
//--the overall species temperature now includes vibrational and electronic excitation
//--the overall gas temperature can now be set
A=0.e00;
B=0.e00;
for(L=1;L<=gas->MSP;L++)
{
A=A+SDOF[L]+output->VARSP[8][N][L]*output->CS[1][N][L];
B=B+SDOF[L]*output->CS[1][N][L];
}
output->VAR[11][N]=A/B;
}
VEL=sqrt(pow(output->VAR[5][N],2)+pow(output->VAR[6][N],2)+pow(output->VAR[7][N],2));
output->VAR[12][N]=VEL/sqrt((DOF+2.e00)*output->VAR[11][N]*(SUM[1]*BOLTZ/SUM[2]))/DOF;
//--Mach number
output->VAR[13][N]=SUM[0]/output->NSAMP; ////--average number of molecules in cell
//dout
if(output->COLLS[N] > 2.0)
{
output->VAR[14][N]=0.5e00*(calc->FTIME-output->TISAMP)*(SUM[1]/output->NSAMP)/output->WCOLLS[N];
//--mean collision time
output->VAR[15][N]=0.92132e00*sqrt(fabs(SUM[7]/SUM[1]-UU))*output->VAR[14][N];
//--mean free path (based on r.m.s speed with correction factor based on equilibrium)
output->VAR[16][N]=output->CLSEP[N]/(output->COLLS[N]*output->VAR[15][N]);
}
else{
output->VAR[14][N]=1.e10;
output->VAR[15][N]=1.e10/output->VAR[3][N];
//--m.f.p set by nominal values
}
}
else
{
for(L=3;L<=22;L++)
{
output->VAR[L][N]=0.0;
}
}
output->VAR[17][N]=VEL;
}
if(calc->FTIME>0.e00*calc->DTM)
{
if(calc->ICLASS==1){
if(geom->IFX==0)
file_3<<"DSMC program for a one-dimensional plane flow"<<endl;//WRITE (3,*) 'DSMC program for a one-dimensional plane flow';
if(geom->IFX==1)
file_3<<"DSMC program for a cylindrical flow"<<endl;//WRITE (3,*) 'DSMC program for a one-dimensional plane flow';
if(geom->IFX==2)
file_3<<"DSMC program for a spherical flow"<<endl;//WRITE (3,*) 'DSMC program for a one-dimensional plane flow';
}
file_3<<endl;//WRITE (3,*)
file_3<<"Interval "<<output->NOUT<<" Time "<<calc->FTIME<< " with "<<output->NSAMP<<" samples from "<<output->TISAMP<<endl;
//WRITE (3,*) 'Interval',output->NOUT,'Time ',calc->FTIME, ' with',output->NSAMP,' samples from',output->TISAMP
//990 FORMAT(I7,G13.5,I7,G13.5)
//Dout
NNN=calc->TOTMOV;
cout<<"TOTAL MOLECULES = "<< molecs->NM<<endl;
//dout
//NMS=0;
for(int i=0;i<gas->MSP+1;i++)
NMS[i]=0;
for(N=1;N<=molecs->NM;N++)
{
M=molecs->IPSP[N];
NMS[M]+=1;
}
file_3<<"Total simulated molecules = "<<molecs->NM<<endl;
for(N=1;N<=gas->MSP;N++)
{
cout<< " SPECIES "<<N<<" TOTAL = "<<NMS[N]<<endl;
file_3<<"Species "<<N<<" total = "<<NMS[N]<<endl;
}
if(gas->MEX>0)
{
ENERGY(0,A);
for(N=1;N<=gas->MSP;N++)
{
if(gas->ISPV[N]>0){
file_9<< "SP "<<N<<" DISSOCS "<<calc->TDISS[N]<<" RECOMBS "<<calc->TRECOMB[N]<<endl;
cout<<"SP"<<N<<"DISSOCS"<<calc->TDISS[N]<<" RECOMBS "<<calc->TRECOMB[N]<<endl;
file_3<<"SP "<<N<<" DISSOCS "<<calc->TDISS[N]<<" RECOMBS "<<calc->TRECOMB[N]<<endl;
}
}
for(N=1;N<=gas->MSP;N++)
{
cout<<"EX,C reaction"<<N<<" number"<<gas->TNEX[N]<<endl;
file_9<<"EX,C reaction "<<N<<" number "<<gas->TNEX[N]<<endl;
file_3<<"EX,C reaction "<<N<<" number "<<gas->TNEX[N]<<endl;
}
}
file_3<<"Total molecule moves = "<<NNN<<endl;
//dout
NNN=calc->TOTCOL;
file_3<<"Total collision events = "<<NNN<<endl;
//
file_3<<"Species dependent collision numbers in current sample"<<endl;
for(N=1;N<=gas->MSP;N++)
{
if(gas->IGAS!=8){
for(M=1;M<=gas->MSP;M++)
file_3<<calc->TCOL[N][M]<<"\t";
file_3<<endl;
//WRITE(3,901) (calc->TCOL[N][M],M=1,gas->MSP);
}
if(gas->IGAS==8){
for(M=1;M<=gas->MSP;M++)
file_3<<calc->TCOL[N][M]<<"\t";
file_3<<endl;
// WRITE(3,902) (calc->TCOL[N][M],M=1,gas->MSP);
}
}
//Dout
//901 FORMAT(5G13.5)
//902 FORMAT(8G13.5)
//dout
CTIME=clock();
file_3<<"Computation time "<<(double)CTIME/1000.0<< "seconds"<<endl;
file_3<<"Collision events per second "<<(calc->TOTCOL-calc->TOTCOLI)*1000.e00/(double)CTIME<<endl;
file_3<<"Molecule moves per secon "<<(calc->TOTMOV-calc->TOTMOVI)*1000.e00/(double)CTIME<<endl;
if(calc->ICLASS==0&& gas->MMVM==0&&calc->ISF==0){
//--a homogeneous gas with no vibratioal modes - assume that it is a collision test run
//******PRODUCES DATA FOR TABLES 6.1 AND 6.2 IN SECTION 6.2*******
//
A=0.e00;
B=0.e00;
C=0.e00;
for(N=1;N<=geom->NCCELLS;N++)
{
A+=geom->CCELL[5][N];
B+=geom->CCELL[4][N];
C+=geom->CCELL[3][N];
}
file_3<<"Overall time step "<<calc->DTM<<endl;
file_3<<"Molecules per collision cell "<<(double)(molecs->NM)/(double)(geom->NCCELLS)<<endl;
file_3<<"Mean cell time ratio "<< A/((double)(geom->NCCELLS)*calc->FTIME)<<endl;
file_3<<"Mean value of cross-section and relative speed "<<B/(double)(geom->NCCELLS)<<endl;
file_3<<"Mean half collision cell time step "<<C/(double)(geom->NCCELLS)<<endl;
if(gas->MSP==1){
A=2.e00*SPI*output->VAR[3][1] *(pow(gas->SP[1][1],2))*sqrt(4.e00*BOLTZ*gas->SP[2][1]/gas->SP[5][1])*pow((output->VAR[11][1])/gas->SP[2][1],(1.e00-gas->SP[3][1]));
//--Eqn. (2.33) for equilibhrium collision rate
file_3<<"Coll. rate ratio to equilib "<<calc->TCOL[1][1]/((double)(molecs->NM)*(calc->FTIME-output->TISAMP))/A<<endl;
}
else{
file_3<<"Species collision rate ratios to equilibrium"<<endl;
for(N=1;N<=gas->MSP;N++){
file_3<<"Collision rate for species "<<N<<endl;
for(M=1;M<=gas->MSP;M++)
{
THCOL[N][M]=2.e00*(1.e00/SPI)*output->VAR[3][1]*output->VARSP[1][1][M]*gas->SPM[2][N][M]*sqrt(2.e00*BOLTZ*gas->SPM[5][N][M]/gas->SPM[1][N][M])*pow(output->VAR[11][1]/gas->SPM[5][N][M],1.e00-gas->SPM[3][N][M]);
//--Eqn. (2.36) for equilibhrium collision rate of species N with species M
file_3<<"with species "<<M<<" "<<calc->TCOL[N][M]/((double)(molecs->NM)*gas->FSP[N][1]*(calc->FTIME-output->TISAMP))/THCOL[N][M]<<endl;
}
}
file_3<<endl;
for(N=1;N<=gas->MSP;N++){
file_3<<"Collision numbers for species "<<N<<endl;
for(M=1;M<=gas->MSP;M++){
file_3<<"with species "<<M<<" "<<calc->TCOL[N][M]<<endl;
}
}
}
}
file_3<<endl;
if(geom->ITYPE[1]==2|| geom->ITYPE[2]==1)
file_3<<"Surface quantities"<<endl;
for(JJ=1;JJ<=2;JJ++)
{
if(geom->ITYPE[JJ]==2){
file_3<<endl;
file_3<<"Surface at "<<geom->XB[JJ]<<endl;
file_3<<"Incident sample "<<output->VARS[0][JJ]<<endl;
file_3<<"Number flux "<<output->VARS[3][JJ]<<" /sq m/s"<<endl;
file_3<<"Inc pressure "<<output->VARS[5][JJ]<<" Refl pressure "<<output->VARS[6][JJ]<<endl;
file_3<<"Pressure "<< output->VARS[5][JJ]+output->VARS[6][JJ]<<" N/sq m"<<endl;
file_3<<"Inc y shear "<<output->VARS[7][JJ]<<" Refl y shear "<<output->VARS[8][JJ]<<endl;
file_3<<"Net y shear "<<output->VARS[7][JJ]-output->VARS[8][JJ]<<" N/sq m"<<endl;
file_3<<"Net z shear "<<output->VARS[9][JJ]-output->VARS[10][JJ]<<" N/sq m"<<endl;
file_3<<"Incident translational heat flux "<<output->VARS[11][JJ]<<" W/sq m"<<endl;
if(gas->MMRM>0)
file_3<<"Incident rotational heat flux "<<output->VARS[13][JJ]<<" W/sq m"<<endl;
if(gas->MMVM>0)
file_3<<"Incident vibrational heat flux "<<output->VARS[15][JJ]<<" W/sq m"<<endl;
if(gas->MELE>1)
file_3<<"Incident electronic heat flux "<<output->VARS[33][JJ]<<" W/sq m"<<endl;
file_3<<"Total incident heat flux "<<output->VARS[29][JJ]<<" W/sq m"<<endl;
file_3<<"Reflected translational heat flux "<<output->VARS[12][JJ]<<" W/sq m"<<endl;
if(gas->MMRM>0)
file_3<<"Reflected rotational heat flux "<<output->VARS[14][JJ]<<" W/sq m"<<endl;
if(gas->MMVM>0)
file_3<<"Reflected vibrational heat flux "<<output->VARS[16][JJ]<<" W/sq m"<<endl;
if(gas->MELE>1)
file_3<<"Reflected electronic heat flux "<<output->VARS[34][JJ]<<" W/sq m"<<endl;
file_3<<"Total reflected heat flux "<<output->VARS[30][JJ]<<" W/sq m"<<endl;
file_3<<"Net heat flux "<<output->VARS[31][JJ]<<" W/sq m"<<endl;
file_3<<"Slip velocity (y direction) "<<output->VARS[19][JJ]<<" m/s"<<endl;
file_3<<"Translational temperature slip"<<output->VARS[20][JJ]<<" K"<<endl;
if(gas->MMRM>0)
file_3<<"Rotational temperature slip "<<output->VARS[21][JJ]<<" K"<<endl;
if(gas->MSP>1)
{
for(L=1;L<=gas->MSP;L++)
{
file_3<<"Species "<<L<<" percentage "<<output->VARS[L+35][JJ]<<endl;
}
}
}
}
file_3<<endl;
//PPA=0;
for(int i=0;i<gas->MSP+1;i++)
PPA[i]=0;
for(N=1;N<=geom->NCELLS;N++)
{
for(M=1;M<=gas->MSP;M++){
PPA[M]=PPA[M]+output->VARSP[0][N][M];
}
}
// WRITE (*,*)
//cin.get();
if(gas->MSP>1)
{
file_3<<"GAINS FROM REACTIONS"<<endl;
file_3<<" Dissoc. Recomb. Endo. Exch. Exo. Exch."<<endl;
for(M=1;M<=gas->MSP;M++){
file_3<<" SPECIES "<<M<<" "<<gas->TREACG[1][M]<<" "<<gas->TREACG[2][M]<<" "<<gas->TREACG[3][M]<<" "<<gas->TREACG[4][M]<<endl;
}
file_3<<endl;
file_3<<"LOSSES FROM REACTIONS"<<endl;
file_3<<" Dissoc. Recomb. Endo. Exch. Exo. Exch."<<endl;
for(M=1;M<=gas->MSP;M++){
file_3<<" SPECIES "<<M<<" "<<gas->TREACL[1][M]<<" "<<gas->TREACL[2][M]<<" "<<gas->TREACL[3][M]<<" "<<gas->TREACL[4][M]<<endl;
}
file_3<<endl;
file_3<<"TOTALS"<<endl;
for(M=1;M<=gas->MSP;M++){
file_3<<" SPECIES "<<M<<" GAINS "<<gas->TREACG[1][M]+gas->TREACG[2][M]+gas->TREACG[3][M]+gas->TREACG[4][M]<<" LOSSES "<<gas->TREACL[1][M]+gas->TREACL[2][M]+gas->TREACL[3][M]+gas->TREACL[4][M]<<endl;
}
}
file_3<<endl;
file_3<<"Flowfield properties "<<endl;
file_3<< output->NSAMP<<" Samples"<<endl;
file_3<<"Overall gas"<<endl;
file_3<<"Cell x coord. Sample Number Dens. Density u velocity v velocity w velocity Trans. Temp. Rot. Temp. Vib. Temp. El. Temp. Temperature Mach no. Mols/cell m.c.t m.f.p mcs/mfp speed Pressure TTX TTY TTZ Species Fractions "<<endl;
for(N=1;N<=geom->NCELLS;N++)
{
file_3<< N<<" ";
for(M=1;M<=10;M++){
file_3<<output->VAR[M][N]<<" ";
}
file_3<<output->VAR[22][N]<<" ";
for(M=11;M<=21;M++){
file_3<<output->VAR[M][N]<<" ";
}
for(L=1;M<=gas->MSP;M++){
file_3<<output->VARSP[1][N][L]<<" ";
}
file_3<<endl;
}
file_3<<"Individual molecular species"<<endl;
for(L=1;L<=gas->MSP;L++){
file_3<<"Species "<<L<<endl;
file_3<<"Cell x coord. Sample Percentage Species TTx Species TTy Species TTz Trans. Temp. Rot. Temp. Vib. Temp. Spec. Temp u Diff. Vel. v Diff. Vel. w. Diff. Vel. Elec. Temp."<<endl;
for(N=1;N<=geom->NCELLS;N++){
file_3<< N<<" "<<output->VAR[1][N]<<" ";
for(M=0;M<=12;M++)
file_3<<output->VARSP[M][N][L]<<" ";
file_3<<endl;
}
}
//dout
//999 FORMAT (I5,30G13.5)
//998 FORMAT (G280.0)
// 997 FORMAT (G188.0)
// CLOSE (3)
file_3.close();
}
if(calc->ICLASS==0 && calc->ISF==1){
//--a homogeneous gas and the "unsteady sampling" option has been chosen-ASSUME THAT IT IS A RELAXATION TEST CASE FOR SECTION 6.2
INITIALISE_SAMPLES();
//write a special output file for internal temperatures and temperature versus collision number
//dout
file_10.open("RELAX.DAT", ios::app | ios::out);
if(file_10.is_open()){
cout<<"RELAX.DAT is opened"<<endl;
}
else{
cout<<"RELAX.DAT not opened"<<endl;
}
// OPEN (10,FILE='RELAX.DAT',ACCESS='APPEND')
A=2.0*calc->TOTCOL/molecs->NM; //--mean collisions
//--VAR(11,N) //--overall
//--VAR(8,N) //--translational
//--VAR(9,N) //--rotational
//--VAR(10,N) //--vibrational
//--VAR(22,N) //--electronic
file_10<<setw(15)<<A<<setw(15)<<output->VAR[8][1]<<setw(15)<<output->VAR[9][1]<<setw(15)<<output->VAR[8][1]-output->VAR[9][1]<<endl;
//file_10<<A<<"\t"<<output->VAR[11][1]<<"\t"<<output->VAR[8][1]<<"\t"<<output->VAR[9][1]<<"\t"<<output->VAR[10][1]<<"\t"<<output->VAR[22][1]<<endl;
//file_10<<A<<"\t"<<output->VAR[8][1]<<"\t"<<output->VAR[9][1]<<"\t"<<output->VAR[8][1]-output->VAR[9][1]<<endl;
// WRITE (10,950) A,VAR(8,1),VAR(9,1),VAR(8,1)-VAR(9,1) //--Generates output for Figs. 6.1 and 6.2
// WRITE (10,950) A,VAR(11,1),VAR(8,1),VAR(9,1),VAR(10,1),VAR(22,1) //--Generates output for modal temperatures in Figs. 6.3, 6.5 +
// WRITE (10,950) A,0.5D00*(VAR(8,1)+VAR(9,1)),VAR(10,1),0.5D00*(VAR(8,1)+VAR(9,1))-VAR(10,1) //--Generates output for Figs. 6.4
//
//--VARSP(8,N,L) //--overall temperature of species L
// WRITE (10,950) A,VARSP(8,1,3),VARSP(8,1,2),VARSP(8,1,5),VARSP(8,1,4),A //--output for Fig 6.17
// CLOSE (10)
file_10.close();
}
//dout
// 950 FORMAT (6G13.5)
if(gas->IGAS==8||gas->IGAS==6||gas->IGAS==4)
{
//--Write a special output file for the composition of a reacting gas as a function of time
//dout
//OPEN (10,FILE='COMPOSITION.DAT',ACCESS='APPEND')
file_10.open("COMPOSITION.DAT", ios::app | ios::out);
if(file_10.is_open()){
cout<<"COMPOSITION.DAT is opened"<<endl;
}
else{
cout<<"COMPOSITION.DAT not opened"<<endl;
}
AS=molecs->NM;
//dout
AT=calc->FTIME*1.e6;
if (gas->IGAS == 4)
file_10<< AT <<" "<<(double)(NMS[1])/1000000<<" "<<A<<" "<<output->VAR[11][1]<<endl; //--Data for fig
if (gas->IGAS == 8)
file_10<<AT<<" "<<NMS[1]/AS<<" "<<NMS[2]/AS<<" "<<NMS[3]/AS<<" "<<NMS[4]/AS<<" "<<NMS[5]/AS<<" "<<NMS[6]/AS<<" "<<NMS[7]/AS<<" "<<NMS[8]/AS<<" "<<output->VAR[11][1]<<endl;
if (gas->IGAS == 6)
file_10<<AT<<" "<<NMS[1]/AS<<" "<<NMS[2]/AS<<" "<<NMS[3]/AS<<" "<<NMS[4]/AS<<" "<<NMS[5]/AS<<" "<<output->VAR[11][1]<<endl;
//dout
// 888 FORMAT(10G13.5)
file_10.close();
}
if(calc->FTIME>0.5e00*calc->DTM){
//
//--reset collision and transit times etc.
//
cout<<"Output files written "<<endl;
DTMI=calc->DTM;
if(calc->IMTS<2){
if(calc->ICLASS>0)
calc->DTM*=2;
//--this makes it possible for DTM to increase, it will be reduced as necessary
for(NN=1;NN<=geom->NCELLS;NN++)
{
CDTM[NN]=calc->DTM;
B=geom->CELL[3][NN]-geom->CELL[2][NN] ;//--sampling cell width
if(output->VAR[13][NN]>20.e00){
//consider the local collision rate
CDTM[NN]=output->VAR[14][NN]*calc->CPDTM;
//look also at sampling cell transit time based on the local flow speed
A=(B/(fabs(output->VAR[5][NN])))*calc->TPDTM;
if(A<CDTM[NN])
CDTM[NN]=A;
}
else{
//-- base the time step on a sampling cell transit time at the refence vmp
A=calc->TPDTM*B/gas->VMPM;
if(A<CDTM[NN])
CDTM[NN]=A;
}
if(CDTM[NN]<calc->DTM)
calc->DTM=CDTM[NN];
}
}
else
{
//dout
//memset(CDTM, calc->DTM, sizeof(*CDTM));
for(int i=0;i<geom->NCELLS+1;i++)
CDTM[i]= calc->DTM;
//CDTM=calc->DTM;
}
for(N=1;N<=geom->NCELLS;N++){
NN=geom->ICCELL[3][N];
geom->CCELL[3][N]=0.5*CDTM[NN];
}
file_9<<"DTM changes from "<<DTMI<<" to "<<calc->DTM<<endl;
calc->DTSAMP=calc->DTSAMP*calc->DTM/DTMI;
calc->DTOUT=calc->DTOUT*calc->DTM/DTMI;
}
else
{
INITIALISE_SAMPLES();
}
if(calc->ICLASS==1&& calc->ISF==1)
{
//*************************************************************************
//--write TECPLOT data files for x-t diagram (unsteady calculation only)
//--comment out if not needed
//dout
file_18.open("DS1xt.DAT", ios::app | ios::out);
if(file_18.is_open()){
cout<<"DS1xt.DAT is opened"<<endl;
}
else
cout<<"DS1xt.DAT not opened"<<endl;
// OPEN (18,FILE='DS1xt.DAT',ACCESS='APPEND')
//--make sure that it is empty at the stary of the run
SETXT();
// CLOSE (18)
file_18.close();
//**************************************************************************
}
//WRITE (19,*) calc->FTIME,-output->VARS[5][1],-output->VARS[5][1]-output->VARS[6][1]
file_7.open("PROFILE.DAT" , ios::out);
if(file_7.is_open()){
cout<<"PROFILE.DAT is opened"<<endl;
}
else
cout<<"PROFILE.DAT not opened"<<endl;
// OPEN (7,FILE='PROFILE.DAT',FORM='FORMATTED')
//
//OPEN (8,FILE='ENERGYPROF.DAT',FORM='FORMATTED')
//
// 995 FORMAT (22G13.5)
// 996 FORMAT (12G14.6)
for(N=1;N<=geom->NCELLS;N++)
{
//
//--the following line is the default output
// WRITE (7,995) VAR(1,N),VAR(4,N),VAR(3,N),VAR(11,N),VAR(18,N),VAR(5,N),VAR(12,N),VAR(8,N),VAR(9,N),VAR(10,N),VAR(22,N), &
// (VARSP(8,N,M),M=1,MSP),(VARSP(1,N,M),M=1,MSP)
//
//--calculate energies per unit mass (employed for re-entry shock wave in Section 7.5)
C1=0.5e00*pow(output->VAR[5][N],2); //--Kinetic
C2=0.e00; //--Thermal
C3=0.e00; //--Rotational
C4=0.e00; //--Vibrational
C5=0.e00; //--Electronic
C6=0.e00; //--Formation
for(L=1;L<=gas->MSP;L++)
{
// C2=C2+3.D00*BOLTZ*VARSP(5,N,L)*VARSP(1,N,L)/SP(5,L)
A=(output->CS[1][N][L]/output->VARSP[1][N][L])*gas->SP[5][L];
if(output->CS[1][N][L]>0.5e00){
C2=C2+0.5e00*(output->CS[5][N][L]+output->CS[6][N][L]+output->CS[7][N][L])*gas->SP[5][L]/A;
if(gas->ISPR[1][L]>0)
C3=C3+output->CS[8][N][L];
if(gas->ISPV[L]>0)
C4=C4+output->CS[10][N][L]*BOLTZ*gas->SPVM[1][1][L]/A;
if(gas->NELL[L]>1)
C5=C5+output->CS[9][N][L]/A;
C6=C6+gas->SP[6][L]*output->CS[1][N][L]/A;
}
}
C2=C2-C1;
// A=0.5D00*VFX(1)**2+2.5D00*BOLTZ*FTMP(1)/(0.75*SP(5,2)+0.25*SP(5,1))
C7=C1+C2+C3+C4+C5+C6;
//
// WRITE (8,995) VAR(1,N),C1/A,C2/A,C3/A,C4/A,C5/A,C6/A,C7/A
//
//--the following lines are for normalised shock wave output in a simple gas (Sec 7.3)
C1=gas->FND[2]-gas->FND[1];
C2=gas->FTMP[2]-gas->FTMP[1];
file_7<<output->VAR[1][N]<<" "<<output->VAR[2][N]<<" "<<(0.5*(output->VAR[20][N]+output->VAR[21][N])-gas->FTMP[1])/C2<<" "<<(output->VAR[19][N]-gas->FTMP[1])/C2<<" "<<(output->VAR[11][N]-gas->FTMP[1])/C2<<" "<<(output->VAR[3][N]-gas->FND[1])/C1<<endl;
//--the following replaces sample size with density
//C3=0.D00
//DO L=1,MSP
// C3=C3+FND(1)*FSP(L,1)*SP(5,L) //--upstream density
//END DO
//C4=0.D00
//DO L=1,MSP
// C4=C4+FND(2)*FSP(L,2)*SP(5,L) //--upstream density
//END DO
//
// WRITE (7,996) VAR(1,N),(VAR(4,N)-C3)/(C4-C3),(0.5*(VAR(20,N)+VAR(21,N))-FTMP(1))/C2,(VAR(19,N)-FTMP(1))/C2,(VAR(11,N)-FTMP(1))/C2, &
// (VAR(3,N)-FND(1))/C1
//--the following lines is for a single species in a gas mixture
// C1=C1*FSP(3,1)
// WRITE (7,996) VAR(1,N),VARSP(1,N,3),(0.5*(VARSP(3,N,3)+VARSP(4,N,3))-FTMP(1))/C2,(VARSP(2,N,3)-FTMP(1))/C2,(VARSP(5,N,3)-FTMP(1))/C2,(VAR(3,N)*VARSP(1,N,3)-FND(1)*FSP(3,1))/C1
//
//--the following line is for Couette flow (Sec 7.4)
// WRITE (7,996) VAR(1,N),VAR(2,N),VAR(5,N),VAR(6,N),VAR(7,N),VAR(11,N)
//--the following line is for the breakdown of equilibrium in expansions (Sec 7.10)
// WRITE (7,996) VAR(1,N),VAR(2,N),VAR(12,N),VAR(4,N),VAR(5,N),VAR(8,N),VAR(9,N),VAR(10,N),VAR(11,N),VAR(19,N),VAR(20,N),VAR(21,N)
//
}
if(calc->ISF==1)
INITIALISE_SAMPLES();
// CLOSE(7)
file_7.close();
//
//--deallocate local variables
//
//dout
for(int i=0;i<gas->MMVM+1;i++){
delete [] TV[i];
}
delete [] TV;
delete [] TVIB;
delete [] VDOF;
for(int i=0;i<gas->MSP+1;i++){
delete [] THCOL[i];
}
delete [] THCOL;
// DEALLOCATE (TV,TVIB,VDOF,THCOL,STAT=ERROR)
// if(calc->ERROR)
// cout<<"PROGRAM COULD NOT DEALLOCATE OUTPUT VARIABLES"<<calc->ERROR;
calc->TOUT=calc->TOUT+calc->DTOUT;
return;
}
void COLLISIONS()
{
//CALC calc;
//MOLECS molecs;
//GAS gas;
//OUTPUT output;
//GEOM_1D geom;
start= clock();
double duration;
int N,NN,M,MM,L,LL,K,KK,KT,J,I,II,III,NSP,MAXLEV,IV,NSEL,KV,LS,MS,KS,JS,IIII,LZ,KL,IS,IREC,NLOOP,IA,IDISS,IEX,NEL,NAS,NPS,
JJ,LIMLEV,KVV,KW,INIL,INIM,JI,LV,IVM,NMC,NVM,LSI,JX,MOLA,KR,JKV,NSC,KKV,IAX,NSTEP,NTRY,NLEVEL,NSTATE,IK,NK,MSI ;
double A,AA,AAA,AB,B,BB,BBB,ABA,ASEL,DTC,SEP,VR,VRR,ECT,EVIB,ECC,ZV,ERM,C,OC,SD,D,CVR,PROB,RML,RMM,ECTOT,ETI,EREC,ET2,
XMIN,XMAX,WFC,CENI,CENF,VRRT,EA,DEN,E1,E2,VRI,VRA ;
double VRC[4],VCM[4],VRCP[4],VRCT[4];
// //N,M,K working integer
// //LS,MS,KS,JS molecular species
// //VRC components of the relative velocity
// //RML,RMM molecule mass parameters
// //VCM components of the center of mass velocity
// //VRCP post-collision components of the relative velocity
// //SEP the collision partner separation
// //VRR the square of the relative speed
// //VR the relative speed
// //ECT relative translational energy
// //EVIB vibrational energy
// //ECC collision energy (rel trans +vib)
// //MAXLEV maximum vibrational level
// //ZV vibration collision number
// //SDF the number of degrees of freedom associated with the collision
// //ERM rotational energy
// //NSEL integer number of selections
// //NTRY number of attempts to find a second molecule
// //CVR product of collision cross-section and relative speed
// //PROB a probability
// //KT third body molecule code
// //ECTOT energy added at recmbination
// //IREC initially 0, becomes 1 of a recombination occurs
// //WFC weighting factor in the cell
// //IEX is the reaction that occurs (1 if only one is possible)
// //EA activation energy
// //NPS the number of possible electronic states
// //NAS the number of available electronic states
//cout<<"START COLLISIONS"<<endl;
// dout
cout<<geom->XB[1]<<" "<<geom->XB[2]<<endl;
for( N=1;N<=geom->NCCELLS;N++)
{
if((calc->FTIME-geom->CCELL[5][N])>geom->CCELL[3][N])
{
// cout<<N <<" "<<geom->CCELL[3][N]<<endl;
DTC=2.e00*geom->CCELL[3][N];
//calculate collisions appropriate to time DTC
if(geom->ICCELL[2][N]>1)
{
//no collisions calculated if there are less than two molecules in collision cell
NN=geom->ICCELL[3][N];
WFC=1.e00;
if(geom->IWF==1 && geom->IVB==0)
{
//dout
WFC=1.e00+geom->WFM*pow(geom->CELL[1][NN],geom->IFX);
}
geom->CCELL[5][N]=geom->CCELL[5][N]+DTC;
if(geom->IVB==0)
{
AAA=geom->CCELL[1][N];
}
if(geom->IVB==1)
{
C=(geom->XB[2]+geom->VELOB*calc->FTIME-geom->XB[1])/(double)(geom->NDIV*geom->NCIS);
//dout
XMIN=geom->XB[1]+(double)(N-1)*C;
XMAX=XMIN+C;
//dout
WFC=1.e00+geom->WFM*pow((0.5e00*(XMIN+XMAX)),geom->IFX);
if(geom->IFX==0)
{
AAA=XMAX-XMIN;
}
if(geom->IFX==1)
{
AAA=PI*(pow(XMAX,2)-pow(XMIN,2)); //assumes unit length of full cylinder
}
if(geom->IFX==2)
{
AAA=1.33333333333333333333e00*PI*(pow(XMAX,3)-pow(XMIN,3)); //flow is in the full sphere
}
}
//these statements implement the N(N-1) scheme
ASEL=0.5e00*geom->ICCELL[2][N]*(geom->ICCELL[2][N]-1)*WFC*calc->FNUM*geom->CCELL[4][N]*DTC/AAA+geom->CCELL[2][N];
NSEL=ASEL;
//dout
geom->CCELL[2][N]=ASEL-(double)(NSEL);
if(NSEL>0)
{
I=0; //counts the number of selections
KL=0; //becomes 1 if it is the last selection
IIII=0; //becomes 1 if there is a recombination
for(KL=1;KL<=NSEL;KL++)
{
I=I+1;
III=0; //becomes 1 if there is no valid collision partner
if(geom->ICCELL[2][N]==2)
{
K=1+geom->ICCELL[1][N];
//dout
L=molecs->ICREF[K];
K=2+geom->ICCELL[1][N];
//dout
M=molecs->ICREF[K];
if(M==molecs->IPCP[L])
{
III=1;
geom->CCELL[5][N]=geom->CCELL[5][N]-DTC;
}
}
else
{
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
K=(int)(calc->RANF*(double)(geom->ICCELL[2][N]))+geom->ICCELL[1][N]+1;
//dout
L=molecs->ICREF[K];
//one molecule has been selected at random
if(calc->NNC==0)
{
//select the collision partner at random
M=L;
NTRY=0;
while(M==L)
{
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
K=(int)(calc->RANF*(double)(geom->ICCELL[2][N]))+geom->ICCELL[1][N]+1;
M=molecs->ICREF[K];
if(M==molecs->IPCP[L])
{
if(NTRY<5*geom->ICCELL[2][N])
{
M=L;
}
else
{
III = 1;
geom->CCELL[5][N]=geom->CCELL[5][N]-DTC/ASEL;
M=L+1;
}
}
}
}
else
{
//elect the nearest from the total number (< 30) or a random 30
if(geom->ICCELL[2][N]<30)
{
LL=geom->ICCELL[2][N];
}
else
{
LL=30;
}
SEP=1.0e10;
M=0;
for(J=1;J<=LL;J++)
{
if(LL<30)
{
K=J+geom->ICCELL[1][N];
}
else
{
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
K=(int)(calc->RANF*(double)(geom->ICCELL[2][N]))+geom->ICCELL[1][N]+1;
}
MM=molecs->ICREF[K];
if(MM != L)
{
//exclude the already selected molecule
if(MM != molecs->IPCP[L])
{
//exclude the previous collision partner
//dout
A=fabs(molecs->PX[1][L]-molecs->PX[1][MM]);
if(A<SEP&& A>1.e-8*geom->DDIV)
{
M=MM;
SEP=A;
}
}
}
}
}
}
if(III==0)
{
for(KK=1;KK<=3;KK++)
{
VRC[KK]=molecs->PV[KK][L]-molecs->PV[KK][M];
}
VRR=VRC[1]*VRC[1]+VRC[2]*VRC[2]+VRC[3]*VRC[3];
VR=sqrt(VRR);
VRI=VR;
//Simple GAs
if(gas->MSP==1)
{
//dout
CVR=VR*gas->CXSS*pow(2.e00*BOLTZ*gas->SP[2][1]/(gas->RMAS*VRR),(gas->SP[3][1]-0.5e00))*gas->RGFS;
if(CVR>geom->CCELL[4][N])
{
geom->CCELL[4][N]=CVR;
}
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(calc->RANF<CVR/geom->CCELL[4][N])
{
// the collision occurs
if(M==molecs->IPCP[L]&& L==molecs->IPCP[M])
{
file_9<<"Duplicate collision"<<endl;
}
calc->TOTCOL=calc->TOTCOL+1.e00;
calc->TCOL[1][1]=calc->TCOL[1][1]+2.e00;
output->COLLS[NN]=output->COLLS[NN]+1.e000;
output->WCOLLS[NN]=output->WCOLLS[NN]+WFC;
//dout
SEP=fabs(molecs->PX[1][L]-molecs->PX[1][M]);
output->CLSEP[NN]=output->CLSEP[NN]+SEP;
if(gas->ISPR[1][1]>0)
{
//Larsen-Borgnakke serial redistribution
ECT=0.5e00*gas->RMAS*VRR;
for(NSP=1;NSP<=2;NSP++)
{
//consider the molecules in turn
if(NSP==1)
{
K=L;
}
else
{
K=M;
}
if(gas->MMVM>0)
{
if(gas->ISPV[1]>0)
{
for(KV=1;KV<=gas->ISPV[1];KV++)
{
EVIB=(double)(molecs->IPVIB[KV][K]*BOLTZ*gas->SPVM[1][KV][1]);
ECC=ECT+EVIB;
if(gas->SPVM[3][KV][1]>0.0)
{
MAXLEV=ECC/(BOLTZ*gas->SPVM[1][KV][1]);
B=gas->SPVM[4][KV][1]/gas->SPVM[3][KV][1]; //Tdiss/Tref
A= gas->SPVM[4][KV][1]/output->VAR[8][NN] ;//Tdiss/Ttrans
//ZV=(A**SPM(3,1,1))*(SPVM(3,KV,1)*(B**(-SPM(3,1,1))))**(((A**0.3333333D00)-1.D00)/((B**0.33333D00)-1.D00))
ZV=pow(A,gas->SPM[3][1][1])*pow(gas->SPVM[3][KV][1]*pow(B,-gas->SPM[3][1][1]),((pow(A,0.3333333e00)-1e00)/(pow(B,33333e00)-1.e00)));
}
else
{
ZV=gas->SPVM[2][KV][1];
MAXLEV=ECC/(BOLTZ*gas->SPVM[1][KV][1])+1;
}
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(1.e00/ZV>calc->RANF)
{
II=0;
while(II==0)
{
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
IV=calc->RANF*(MAXLEV+0.99999999e00);
molecs->IPVIB[KV][K]=IV;
EVIB=(double)(IV)*BOLTZ;
if(EVIB<ECC)
{
PROB=pow((1.e00-EVIB/ECC),(1.5e00-gas->SPM[3][KV][1]));
//PROB is the probability ratio of eqn (3.28)
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(PROB>calc->RANF)
II=1;
}
}
ECT=ECC-EVIB;
}
}
}
}
//now rotation of this molecule
//dout
if(gas->ISPR[1][1] > 0)
{
if(gas->ISPR[2][1]==0)
{
B=1.e00/gas->SPR[1][1];
}
else //use molecule rather than mean value
{
B=1.e00/(gas->SPR[1][1]+gas->SPR[2][1]*output->VAR[8][NN]+gas->SPR[3][1]*pow(output->VAR[8][NN],2));
}
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(B>calc->RANF)
{
ECC=ECT +molecs->PROT[K];
if(gas->ISPR[1][1]==2)
{
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
ERM=1.e00-pow(calc->RANF,1.e00/(2.5e00-gas->SP[3][1])); //eqn(5.46)
}
else
{
//dout
LBS(0.5e00*gas->ISPR[1][1]-1.e00,1.5e00-gas->SP[3][1],ERM);
}
molecs->PROT[K]=ERM*ECC;
ECT=ECC-molecs->PROT[K];
}
}
}
//adjust VR for the change in energy;
VR=sqrt(2.e00*ECT/gas->SPM[1][1][1]);
}
//end of L-B redistribution
for(KK=1;KK<=3;KK++)
{
VCM[KK]=0.5e00*(molecs->PV[KK][L]+molecs->PV[KK][M]);
}
//dout
if(fabs(gas->SP[4][1]-1.0) < 0.001)
{
//use the VHS logic //dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
B=2.e00*calc->RANF-1.e00;
//B is the cosine of a random elevation angle
A=sqrt(1.e00-B*B);
VRCP[1]=B*VR;
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
C=2.e00*PI*calc->RANF;
//C is a random azimuth angle
//dout
VRCP[2]=A*cos(C)*VR;
VRCP[3]=A*sin(C)*VR;
}
else
{
//use the VSS logic //dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
B=2.e00*(pow(calc->RANF,gas->SP[4][1]))-1.e00;
//B is the cosine of the deflection angle for the VSS model (Eqn. 11.8) of Bird(1994))
A=sqrt(1.e00-B*B);
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
C=2.e00*PI*calc->RANF;
//dout
OC=(double)cos(C);
SD=(double)sin(C);
D=sqrt(pow(VRC[2],2)+pow(VRC[3],2));
VRA=VR/VRI;
VRCP[1]=(B*VRC[1]+A*SD*D)*VRA;
VRCP[2]=(B*VRC[2]+A*(VRI*VRC[3]*OC-VRC[1]*VRC[2]*SD)/D)*VRA;
VRCP[3]=(B*VRC[2]+A*(VRI*VRC[2]*OC-VRC[1]*VRC[3]*SD)/D)*VRA;
//the post-collision rel. velocity components are based on eqn (3.18)
}
for(KK=1;KK<=3;KK++)
{
molecs->PV[KK][L]=VCM[KK]+0.5e00*VRCP[KK];
molecs->PV[KK][M]=VCM[KK]-0.5e00*VRCP[KK];
}
molecs->IPCP[L]=M;
molecs->IPCP[M]=L;
}
} //collision occurrence
else
{
//Gas Mixture
LS=fabs(molecs->IPSP[L]);
MS=fabs(molecs->IPSP[M]);
CVR=VR*gas->SPM[2][LS][MS]*pow(((2.e00*BOLTZ*gas->SPM[5][LS][MS])/((gas->SPM[1][LS][MS])*VRR)),(gas->SPM[3][LS][MS]-0.5e00))*gas->SPM[6][LS][MS];
if(CVR>geom->CCELL[4][N])
{
geom->CCELL[4][N]=CVR;
}
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(calc->RANF<CVR/geom->CCELL[4][N] && molecs->IPCELL[L]>0 && molecs->IPCELL[M]>0)
{
//the collision occurs (-ve IPCELL indicates recombined molecule marled for removal)
if(M==molecs->IPCP[L] && L==molecs->IPCP[M])
{
file_9<<"Duplicate collision";
}
calc->TOTCOL=calc->TOTCOL+1.e00;
calc->TCOL[LS][MS]=calc->TCOL[LS][MS]+1.e00;
calc->TCOL[MS][LS]=calc->TCOL[MS][LS]+1.e00;
output->COLLS[NN]=output->COLLS[NN]+1.e00;
output->WCOLLS[NN]=output->WCOLLS[NN]+WFC;
SEP=fabs(molecs->PX[1][L]-molecs->PX[1][M]);
output->CLSEP[NN]=output->CLSEP[NN]+SEP;
RML=gas->SPM[1][LS][MS]/gas->SP[5][MS];
RMM=gas->SPM[1][LS][MS]/gas->SP[5][LS];
for(KK=1;KK<=3;KK++)
{
VCM[KK]=RML*molecs->PV[KK][L]+RMM*molecs->PV[KK][M];
}
IDISS=0;
IREC=0;
IEX=0;
//check for dissociation
if(gas->ISPR[1][LS]>0 || gas->ISPR[1][MS]>0)
{
ECT=0.5e00*gas->SPM[1][LS][MS]*VRR;
for(NSP=1;NSP<=2;NSP++)
{
if(NSP==1)
{
K=L; KS=LS; JS=MS;
}
else
{
K=M ; KS=MS ; JS=LS;
}
if(gas->MMVM>0)
{
if(gas->ISPV[KS]>0)
{
for(KV=1;KV<=gas->ISPV[KS];KV++)
{
if(molecs->IPVIB[KV][K]>=0 && IDISS==0)
{
//do not redistribute to a dissociating molecule marked for removal
EVIB=(double)(molecs->IPVIB[KV][K]*BOLTZ*gas->SPVM[1][KV][KS]);
ECC=ECT+EVIB;
MAXLEV=ECC/(BOLTZ*gas->SPVM[1][KV][KS]);
LIMLEV=gas->SPVM[4][KV][KS]/gas->SPVM[1][KV][KS];
if(MAXLEV > LIMLEV)
{
//dissociation occurs subject to reduction factor - reflects the infinity of levels past the dissociation limit
//dout
// RANDOM_NUMBER(RANF)
calc->RANF=((double)rand()/(double)RAND_MAX);
if(calc->RANF<gas->SPVM[5][KV][KS])
{
IDISS=1;
LZ=molecs->IPVIB[KV][K];
output->NDISSL[LZ]=output->NDISSL[LZ]+1;
ECT=ECT-BOLTZ*gas->SPVM[4][KV][KS]+EVIB;
//adjust VR for the change in energy
VRR=2.e00*ECT/gas->SPM[1][LS][MS];
VR=sqrt(VRR);
molecs->IPVIB[KV][K]=-1;
//a negative IPVIB marks a molecule for dissociation
}
}
}
}
}
}
}
}
IEX=0; //becomes the reaction number if a reaction occurs
IREC=0; //becomes 1 if a recombination occurs
if(IDISS==0)
{
//dissociation has not occurred
//consider possible recombinations
if(gas->ISPRC[LS][MS]>0 && geom->ICCELL[2][N]>2)
{
//possible recombination using model based on collision volume for equilibrium
KT=L;
//NTRY=0
while(KT==L||KT==M)
{
NTRY+=1;
// if(NTRY>100)
// {
// cout>>"NTRY 3rd body"<<NTRY;
// }
//RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);\
K=(int)(calc->RANF*(double)(geom->ICCELL[2][N]))+geom->ICCELL[1][N]+1;
KT=molecs->ICREF[K];
}
KS=molecs->IPSP[KT];
//the potential third body is KT OF species KS
AA=(PI/6.e00)*pow((gas->SP[1][LS]+gas->SP[1][MS]+gas->SP[1][KS]),3); //reference volume
BB=AA*gas->SPRC[1][LS][MS][KS]*pow(output->VAR[8][NN]/gas->SPVM[1][gas->ISPRK[LS][MS]][gas->ISPRC[LS][MS]],gas->SPRC[2][LS][MS][KS]);//collision volume
B=BB*geom->ICCELL[2][N]*calc->FNUM/AAA;
if(B>1.e00)
{
cout<<"THREE BODY PROBABILITY"<<B;
//for low density flows in which three-body collisions are very rare, it is advisable to consider recombinations in only a small
//fraction of collisions and to increase the pribability by the inverse of this fraction. This message provides a warning if this
//factor has been set to an excessively large value
}
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(calc->RANF<B)
{
IREC=1;
calc->TRECOMB[gas->ISPRC[LS][MS]]=calc->TRECOMB[gas->ISPRC[LS][MS]]+1.e00;
//the collision now becomes a collision between these with L having the center of mass velocity
A=0.5e00*gas->SPM[1][LS][MS]*VRR ;//the relative energy of the recombining molecules
if(gas->ISPR[1][LS]>0)
A=A+molecs->PROT[L];
if(gas->MELE>1)
A=A+molecs->PELE[L];
if(gas->ISPV[LS]>0)
{
for(KVV=1;KVV<=gas->ISPV[LS];KVV++)
{
JI=molecs->IPVIB[KVV][L];
if(JI<0)
JI=-JI;
if(JI==99999)
JI=0;
A=A+(double)(JI)*BOLTZ*gas->SPVM[1][KVV][LS];
}
}
if(gas->ISPR[1][MS]>0)
A+=molecs->PROT[M];
if(gas->MELE>1)
A=A+molecs->PELE[M];
if(gas->ISPV[MS]>0)
{
for(KVV=1;KVV<=gas->ISPV[MS];KVV++)
{
JI=molecs->IPVIB[KVV][M];
if(JI<0)
JI=-JI;
if(JI==99999)
JI=0;
A=A+(double)(JI)*BOLTZ*gas->SPVM[1][KVV][MS];
}
}
gas->TREACL[2][LS]=gas->TREACL[2][LS]-1;
gas->TREACL[2][MS]=gas->TREACL[2][MS]-1;
LSI=LS;
MSI=MS;
LS=gas->ISPRC[LS][MS];
molecs->IPSP[L]=LS;
//any additional vibrational modes must be set to zero
IVM=gas->ISPV[LSI];
NMC=molecs->IPSP[L];
NVM=gas->ISPV[NMC];
if(NVM>IVM)
{
for(KV=IVM+1;KV<=NVM;KV++)
{
molecs->IPVIB[KV][L]=0;
}
}
if(gas->MELE>1)
molecs->PELE[KV]=0.e00;
molecs->IPCELL[M]=-molecs->IPCELL[M]; //recombining molecule M marked for removal
M=KT; //third body molecule is set as molecule M
MS=KS;
gas->TREACG[2][LS]=gas->TREACG[2][LS]+1;
if(gas->ISPR[1][LS]>0)
{
molecs->PROT[L]=0.e00;
}
if(gas->MELE>1)
molecs->PELE[L]=0.e00;
if(gas->ISPV[LS]>0)
{
for(KVV=1;KVV<=gas->ISPV[LS];KVV++)
{
if(molecs->IPVIB[KVV][L]<0)
{
molecs->IPVIB[KVV][L]=-99999;
}
else
{
molecs->IPVIB[KVV][L]=0;
}
}
}
if(gas->ISPR[1][MS]>0)
{
molecs->PROT[M]=molecs->PROT[KT];
}
if(gas->MELE>1)
molecs->PELE[M]=molecs->PELE[KT];
if(gas->ISPV[MS]>0)
{
for(KVV=1;KVV<=gas->ISPV[MS];KVV++)
{
molecs->IPVIB[KVV][M]=molecs->IPVIB[KVV][KT];
}
}
ECTOT=A+gas->SPVM[4][1][LS]*BOLTZ ; //the energy added to this collision
for(KK=1;KK<=3;KK++)
{
molecs->PV[KK][L]=VCM[KK];
}
for(KK=1;KK<=3;KK++)
{
VRC[KK]=molecs->PV[KK][L]-molecs->PV[KK][M];
}
VRR=VRC[1]*VRC[1]+VRC[2]*VRC[2]+VRC[3]*VRC[3];
ECT=0.5e00*gas->SPM[1][LS][MS]*VRR*ECTOT;
//set the vibrational energy of the recombined molecule L to enforce detailed balance
IK=-1;
NK=-1;
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
//NTRY=0;
while(IK<0)
{
// NTRY+=1;
// if(NTRY>100)
// cout<<"NTRY VibEn"<<NTRY;
NK=NK+1;
BB=(output->VAR[8][NN]-gas->SPRT[1][LSI][MSI])*(gas->SPRP[2][LSI][MSI][NK]-gas->SPRP[1][LSI][MSI][NK])/(gas->SPRT[2][LSI][MSI]-gas->SPRT[1][LSI][MSI])-gas->SPRP[1][LSI][MSI][NK];
if(calc->RANF<BB)
IK=NK;
}
molecs->IPVIB[1][L]=IK;
ECT=ECT-(double)(IK)*BOLTZ*gas->SPVM[1][gas->ISPRK[LSI][MSI]][LS];
VRR=2.e00*ECT/gas->SPM[1][LS][MS];
VR=sqrt(VRR);
RML=gas->SPM[1][LS][MS]/gas->SP[5][MS];
RMM=gas->SPM[1][LS][MS]/gas->SP[5][LS];
for(KK=1;KK<=3;KK++)
{
VCM[KK]=RML*molecs->PV[KK][L]+RMM*molecs->PV[KK][M];
}
}
}
//consider exchange and chain reactions
if(gas->NSPEX[LS][MS]>0 && IREC==0 && IDISS==0)
{
//possible exchange reaction
//memset(gas->PSF,0.e00,sizeof(*gas->PSF));//gas->PSF=0.e00; //PSF(MMEX) PSF is the probability that this reaction will occur in this collision
for(int i=0;i<gas->MMEX+1;i++)
gas->PSF[i]=0.e00;
for(JJ=1;JJ<=gas->NSPEX[LS][MS];JJ++)
{
if(LS==gas->ISPEX[JJ][1][LS][MS])
{
K=L; KS=LS;JS=MS;
}
else
{
K=M; KS=MS; JS=LS;
}
//the pre-collision molecule that splits is K of species KS
if(gas->SPEX[3][JJ][LS][MS]<0.e00)
KV=gas->ISPEX[JJ][5][LS][MS];
if(gas->SPEX[3][JJ][LS][MS]>0.e00)
{
KV=gas->ISPEX[JJ][7][LS][MS];
}
JI=molecs->IPVIB[KV][K];
if(JI<0)
JI=-JI;
if(JI==99999)
JI=0;
ECC=0.5e00*gas->SPM[1][LS][MS]*VRR+(double)(JI)*BOLTZ*gas->SPVM[1][KV][KS];
if(gas->SPEX[3][JJ][KS][JS]>0.e00)
{
//reverse exothermic reaction
gas->PSF[JJ]=(gas->SPEX[1][JJ][KS][JS]*pow(output->VAR[8][NN]/273.e00,gas->SPEX[2][JJ][KS][JS]))*exp(-gas->SPEX[6][JJ][KS][JS]/(BOLTZ*output->VAR[8][NN]));
}
else
{
//forward endothermic reaction
MAXLEV=ECC/(BOLTZ*gas->SPVM[1][KV][KS]);
EA=fabs(gas->SPEX[3][JJ][KS][JS]); //temporarily just the heat of reaction;
if(ECC>EA)
{
//the collision energy must exceed the heat of reaction
EA=EA+gas->SPEX[6][JJ][KS][JS]; //the activation energy now includes the energy barrier
DEN=0.e00;
for(IAX=0;IAX<=MAXLEV;IAX++)
{
DEN=DEN+pow((1.e00-(double)(IAX)*BOLTZ*gas->SPVM[1][KV][KS]/ECC),(1.5e00-gas->SPM[3][KS][JS]));
}
gas->PSF[JJ]=(double)(gas->ISPEX[JJ][6][LS][MS])*pow((1.e00-EA/ECC),(1.5e00-gas->SPM[3][KS][JS]))/DEN;
}
}
}
if(gas->NSPEX[LS][MS]>1)
{
BB=0.e00;
for(JJ=1;JJ<=gas->NSPEX[LS][MS];JJ++)
{
BB=BB+gas->PSF[JJ];
}
//BB is the sum of the probabilities
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(BB>calc->RANF)
{
BB=0.e00;
IEX=0;
JJ=0;
//NTRY=0;
while(JJ<gas->NSPEX[LS][MS]&& IEX==0)
{
// NTRY=NTRY+1;
// if(NTRY>100)
// {
// cout<<"NTRY find IEX"<<NTRY;
// }
JJ+=1;
BB+=gas->PSF[JJ];
if(BB>calc->RANF)
IEX=JJ;
}
}
}
else
{
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
IEX=0;
if(gas->PSF[1]>calc->RANF)
IEX=1;
}
if(IEX>0)
{
//exchange or chain reaction occurs
JX=gas->NEX[IEX][LS][MS];
//cout<<"Reaction"<<JX;
gas->TNEX[JX]=gas->TNEX[JX]+1.e00;
//cout<<IEX<<L<<M<<LS<<MS;
molecs->IPSP[L]=gas->ISPEX[IEX][3][LS][MS]; //L is now the new molecule that splits
molecs->IPSP[M]=gas->ISPEX[IEX][4][LS][MS];
LSI=LS;
MSI=MS;
//any additional vibrational modes must be set to zero
IVM=gas->ISPV[LS];
NMC=molecs->IPCP[L];
NVM=gas->ISPV[NMC];
if(NVM>IVM)
{
for(KV=IVM+1;KV<=NVM;KV++)
{
molecs->IPVIB[KV][L]=0;
}
}
IVM=gas->ISPV[MS];
NMC=molecs->IPCP[M];
NVM=gas->ISPV[NMC];
if(NVM>IVM)
{
for(KV=IVM+1;KV<=NVM;KV++)
{
molecs->IPVIB[KV][M]=0;
}
}
//put all pre-collision energies into the relative translational energy and adjust for the reaction energy
ECT=0.5e00*gas->SPM[1][LS][MS]*VRR;
if(gas->ISPR[1][LS]>0)
ECT=ECT+molecs->PROT[L];
if(gas->MELE>1)
ECT=ECT+molecs->PELE[L];
if(gas->ISPV[LS]>0)
{
for(KV=1;KV<=gas->ISPV[LS];KV++)
{
JI=molecs->IPVIB[KV][L];
if(JI<0)
JI=-JI;
if(JI==99999)
JI=0;
ECT=ECT+(double)(JI)*BOLTZ*gas->SPVM[1][KV][LS];
}
}
if(gas->ISPR[1][MS]>0)
ECT=ECT+molecs->PROT[M];
if(gas->MELE>1)
ECT=ECT+molecs->PELE[M];
if(gas->ISPV[MS]>0)
{
for(KV=1;KV<=gas->ISPV[MS];KV++)
{
JI=molecs->IPVIB[KV][M];
if(JI<0)
JI=-JI;
if(JI==99999)
JI=0;
ECT=ECT+(double)(JI)*BOLTZ*gas->SPVM[1][KV][MS];
}
}
ECT=ECT+gas->SPEX[3][IEX][LS][MS];
if(ECT<0.0)
{
cout<<"-VE ECT "<<ECT<<endl;
cout<<"REACTION "<<JJ<<" BETWEEN "<<LS<<" "<<MS<<endl;
//dout
cin.get();
return ;
}
if(gas->SPEX[3][IEX][LS][MS]<0.e00)
{
gas->TREACL[3][LS]=gas->TREACL[3][LS]-1;
gas->TREACL[3][MS]=gas->TREACL[3][MS]-1;
LS=molecs->IPSP[L] ;
MS=molecs->IPSP[M] ;
gas->TREACG[3][LS]=gas->TREACG[3][LS]+1;
gas->TREACG[3][MS]=gas->TREACG[3][MS]+1;
}
else
{
gas->TREACL[4][LS]=gas->TREACL[4][LS]-1;
gas->TREACL[4][MS]=gas->TREACL[4][MS]-1;
LS=molecs->IPSP[L] ;
MS=molecs->IPSP[M] ;
gas->TREACG[4][LS]=gas->TREACG[4][LS]+1;
gas->TREACG[4][MS]=gas->TREACG[4][MS]+1;
}
RML=gas->SPM[1][LS][MS]/gas->SP[5][MS];
RMM=gas->SPM[1][LS][MS]/gas->SP[5][LS];
//calculate the new VRR to match ECT using the new molecular masses
VRR=2.e00*ECT/gas->SPM[1][LS][MS];
if(gas->ISPV[LS]>0)
{
for(KV=1;gas->ISPV[LS];KV++)
{
if(molecs->IPVIB[KV][L]<0)
{
molecs->IPVIB[KV][L]=-99999;
}
else
{
molecs->IPVIB[KV][L]=0;
}
}
}
if(gas->ISPR[1][LS]>0)
molecs->PROT[L]=0;
if(gas->MELE>1)
molecs->PELE[L]=0.e00;
if(gas->ISPV[MS]>0)
{
for(KV=1;gas->ISPV[MS];KV++)
{
if(molecs->IPVIB[KV][M]<0)
{
molecs->IPVIB[KV][M]=-99999;
}
else
{
molecs->IPVIB[KV][M]=0;
}
}
}
if(gas->ISPR[1][MS]>0)
molecs->PROT[M]=0;
if(gas->MELE>1)
molecs->PELE[M]=0.e00;
//set vibrational level of product molecule in exothermic reaction to enforce detailed balance
if(gas->SPEX[3][IEX][LSI][MSI]>0.e00)
{
//exothermic exchange or chain reaction
IK=-1; //becomes 0 when the level is chosen
NK=-1;
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
//NTRY=0;
while(IK<0)
{
// NTRY=NTRY+1;
// if(NTRY>100)
// {
// cout>>"NTRY VibProd"<<NTRY<<endl;
// }
NK=NK+1;
BB=(output->VAR[8][NN]-gas->SPEX[4][IEX][LSI][MSI])* (gas->SPREX[2][IEX][LSI][MSI][NK]-gas->SPREX[1][IEX][LSI][MSI][NK])/(gas->SPEX[5][IEX][LSI][MSI]-gas->SPEX[4][IEX][LSI][MSI])+gas->SPREX[1][IEX][LSI][MSI][NK];
if(calc->RANF<BB)
IK=NK;
}
if(gas->NSLEV[1][LS]>0)
{
IK+=gas->NSLEV[1][LS];
gas->NSLEV[1][LS]=0;
}
KV=gas->ISPEX[IEX][7][LSI][MSI];
molecs->IPVIB[KV][L]=IK;
EVIB=(double)(IK)*BOLTZ*gas->SPVM[1][KV][LS];
ECT=ECT-EVIB;
if(ECT<0.e00)
{
//NTRY=0;
while(ECT<0.e00)
{
//NTRY+=1;
// if(NTRY>100)
// cout<<"NTRY ECT<0"<<NTRY<<endl;
molecs->IPVIB[KV][L]=molecs->IPVIB[KV][L]-1;
gas->NSLEV[1][LS]+=1;
ECT=ECT+BOLTZ*gas->SPVM[1][KV][LS];
}
}
}
else
{
//for endothermic reaction, select vibration from vib. dist. at macroscopic temperature
//normal L-B selection would be from the excessively low energy after the endo. reaction
KV=gas->ISPEX[IEX][5][LS][MS];
//dout
SVIB(LS,output->VAR[8][NN],IK,KV);
if(gas->NSLEV[2][LS]>0)
{
IK=IK+gas->NSLEV[2][LS];
gas->NSLEV[2][LS]=0;
}
molecs->IPVIB[KV][L]=IK;
EVIB=(double)(IK)*BOLTZ*gas->SPVM[1][KV][LS];
ECT=ECT-EVIB;
if(ECT<0.e00)
{
//NTRY=0;
while(ECT<0.e00)
{
//NTRY+=1;
molecs->IPVIB[KV][L]-=1;
gas->NSLEV[2][LS]+=1;
ECT=ECT+BOLTZ*gas->SPVM[1][KV][LS];
// if(NTRY>100)
// {
//cout<<"NTRY ECT<0#2"<<NTRY<<endl;
// molecs->IPVIB[KV][L]=0;
// ECT+=EVIB;
// gas->NSLEV[2][LS]=0;
// }
}
}
}
//set rotational energy of molecule L to equilibrium at the macroscopic temperature
SROT(LS,output->VAR[8][NN],molecs->PROT[L]);
if(gas->SLER[LS]>1.e-21)
{
molecs->PROT[L]+=gas->SLER[LS];
gas->SLER[LS]=1.e-21;
}
ECT-=molecs->PROT[L];
ABA=molecs->PROT[L];
if(ECT<0.e00)
{
//NTRY=0;
while(ECT<0.e00)
{
//NTRY+=1;
BB=0.5e00*molecs->PROT[L];
gas->SLER[LS]+=BB;
molecs->PROT[L]=BB;
ECT+=BB;
// if(NTRY>100)
// {
// cout<<"NTRY ECT<0#3"<<NTRY<<L<<endl;
// ECT+=ABA;
// molecs->PROT[L]=0;
// gas->SLER[LS]=1.e-21;
// }
}
}
//calculate the new VRR to match ECT using the new molecular masses
VRR=2.e00*ECT/gas->SPM[1][LS][MS];
}
}
}
//end of reactions other than the deferred dissociation action in the DISSOCIATION subroutine
if(IREC==0 && IDISS==0)
{
//recombined redistribution already made and there is a separate subroutine for dissociation
//Larsen-Borgnakke serial redistribution
ECT=0.5e00*gas->SPM[1][LS][MS]*VRR;
for(NSP=1;NSP<=2;NSP++)
{
if(NSP==1)
{
K=L;KS=LS;JS=MS;
}
else
{
K=M; KS=MS; JS=LS;
}
//now electronic energy for this molecule
if(gas->MELE>1)
{
B=1.e00/gas->QELC[3][1][KS];
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(B>calc->RANF)
{
NPS=0;
ECC=ECT+molecs->PELE[K];
if(gas->NELL[KS]==1){
NPS=gas->QELC[1][1][KS]; //number of possible states is at least the degeneracy of the ground state
}
if(gas->NELL[KS]>1)
{
for(NEL=1;NEL<=gas->NELL[KS];NEL++)
{
if(ECC>BOLTZ*gas->QELC[2][NEL][KS])
NPS=NPS+gas->QELC[1][NEL][KS];
}
II=0;
//NTRY=0;
while(II==0)
{
//NTRY+=1;
// if(NTRY>100)
// cout<<"NTRY ElecEn"<<NTRY<<endl;
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
NSTATE=ceil(calc->RANF*NPS);//random state, now determine the energy level
NAS=0;
NLEVEL=-1;
for(NEL=1;NEL<=gas->NELL[KS];NEL++)
{
NAS= NAS+gas->QELC[1][NEL][KS];
if(NSTATE<=NAS && NLEVEL<0)
NLEVEL=NEL;
}
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if((1.e00/(B*gas->QELC[3][NLEVEL][KS]))<calc->RANF)
{
II=1;
}
else
{
if(ECC>BOLTZ*gas->QELC[2][NLEVEL][KS])
{
PROB=pow(1.e00-BOLTZ*gas->QELC[2][NLEVEL][KS]/ECC,(1.5e00-gas->SPM[3][KS][JS]));
//dout
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(PROB>calc->RANF)
{
II=1;
molecs->PELE[K]=BOLTZ*gas->QELC[2][NLEVEL][KS];
}
}
}
}
ECT=ECC-molecs->PELE[K];
}
}
}
//now the vibrational energy for this molecule
if(gas->MMVM>0 && IEX==0)
{
if(gas->ISPV[KS]>0)
{
for(KV=1;KV<=gas->ISPV[KS];KV++)
{
if(molecs->IPVIB[KV][K]>=0 && IDISS==0) //do not redistribute to a dissociating molecule marked for removal
{
EVIB=(double)(molecs->IPVIB[KV][K])*BOLTZ*gas->SPVM[1][KV][KS];
ECC=ECT+EVIB;
MAXLEV=ECC/(BOLTZ*gas->SPVM[1][KV][KS]);
if(gas->SPVM[3][KV][KS]>0.0)
{
B=gas->SPVM[4][KV][KS]/gas->SPVM[3][KV][KS];
A=gas->SPVM[4][KV][KS]/output->VAR[8][NN];
ZV = pow(A,gas->SPM[3][KS][JS])*pow((gas->SPVM[2][KV][KS]*pow(B,-gas->SPM[3][KS][JS])),((pow(A,0.3333333e00)-1.e00)/(pow(B,0.33333e00)-1.e00)));
}
else
ZV=gas->SPVM[2][KV][KS];
// RANDOM_NUMBER(RANF) //dout
calc->RANF=((double)rand()/(double)RAND_MAX);
if(1.e00/ZV>calc->RANF ||IREC==1)
{
II=0;
NSTEP=0;
while(II==0 && NSTEP<100000)
{
NSTEP+=1;
if(NSTEP>99000)
{
cout<<NSTEP<<" "<<ECC<<" "<<MAXLEV<<endl;
//dout
return ;
}
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
IV=calc->RANF*(MAXLEV+0.99999999e00);
molecs->IPVIB[KV][K]=IV;
EVIB=(double)(IV)*BOLTZ*gas->SPVM[1][KV][KS];
if(EVIB<ECC)
{
PROB=pow(1.e00-EVIB/ECC,1.5e00-gas->SPVM[3][KS][JS]);
//PROB is the probability ratio of eqn (3.28)
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(PROB>calc->RANF)
II=1;
}
}
ECT=ECC-EVIB;
}
}
}
}
}
//now rotation of this molecule
//dout
if(gas->ISPR[1][KS] > 0)
{
if(gas->ISPR[2][KS]==0 && gas->ISPR[2][JS]==0)
{
B=1.e00/gas->SPM[7][KS][JS];
}
else
B=1.e00/(gas->SPR[1][KS])+gas->SPR[2][KS]*output->VAR[8][NN]+gas->SPR[3][KS]*pow(output->VAR[8][NN],2);
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
if(B>calc->RANF|| IREC==1)
{
ECC=ECT+molecs->PROT[K];
if(gas->ISPR[1][KS]==2)
{
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
ERM=1.e00-pow(calc->RANF,(1.e00/(2.5e00-gas->SPM[3][KS][JS])));//eqn(5.46)
}
else
LBS(0.5e00*gas->ISPR[1][KS]-1.e00,1.5e00-gas->SPM[3][KS][JS],ERM);
molecs->PROT[K]=ERM*ECC;
ECT=ECC-molecs->PROT[K];
}
}
}
//adjust VR for the change in energy
VR=sqrt(2.e00*ECT/gas->SPM[1][LS][MS]);
}//end of L-B redistribution
if(fabs(gas->SPM[8][LS][MS]-1.0)<0.001)
{
//use the VHS logic
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
B=2.e00*calc->RANF-1.e00;
//B is the cosine of a random elevation angle
A=sqrt(1.e00-B*B);
VRCP[1]=B*VR;
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
C=2.e00*PI*calc->RANF;
//C is a random azimuth angle;
VRCP[2]=A*(double)cos(C)*VR;
VRCP[3]=A*(double)sin(C)*VR;
}
else
{
//use the VSS logic
//the VRCP terms do not allow properly for the change in VR - see new book !STILL TO BE FIXED
VRA=VR/VRI;
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
B=2.e00*pow(calc->RANF,gas->SP[4][1])-1.e00;
// B is the cosine of the deflection angle for the VSS model
A=sqrt(1.e00-B*B);
// RANDOM_NUMBER(RANF);
calc->RANF=((double)rand()/(double)RAND_MAX);
C=2.e00*PI*calc->RANF;
OC=(double)cos(C);
SD=(double)sin(C);
D=sqrt(pow(VRC[2],2)+pow(VRC[3],2));
VRCP[1]=(B*VRC[1]+A*SD*D)*VRA;
VRCP[2]=(B*VRC[2]+A*(VRI*VRC[3]*OC-VRC[1]*VRC[2]*SD)/D)*VRA;
VRCP[3]=(B*VRC[3]+A*(VRI*VRC[2]*OC+VRC[1]*VRC[3]*SD)/D)*VRA;
//the post-collision rel. velocity components are based on eqn (3.18)
}
for(KK=1;KK<=3;KK++)
{
molecs->PV[KK][L]=VCM[KK]+RMM*VRCP[KK];
molecs->PV[KK][M]=VCM[KK]-RMM*VRCP[KK];
}
molecs->IPCP[L]=M;
molecs->IPCP[M]=L;
//call energy(0,E2)
// ! IF (Dfabs(E2-E1) > 1.D-14) read(*,*)
}////collision occurrence
}
}//separate simplegas / mixture coding
}
}
}
}
}//remove any recombined atoms
duration = ( std::clock() - start ) / (double) CLOCKS_PER_SEC;
std::cout<<"printf: "<< duration <<'\n';
for(N=1;N<=molecs->NM;N++)
{
if(molecs->IPCELL[N]<0)
REMOVE_MOL(N);
}
return;
}
|
d97f507ba67d40ba119cd4ae49f2d55e7685819c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <cpu_bitmap.h>
#define INF 2e10f
#define rnd(x)(x*rand()/RAND_MAX)
#define SPHERES 20
#define DIM 1024
struct Sphere {
float r, b, g;
float radius;
float x, y, z;
__device__ float hit(float ox, float oy, float *n) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf(radius*radius - dx*dx - dy*dy);
*n = dz / sqrtf(radius*radius);
return dz + z;
}
return -INF;
}
};
struct DataBlock {
unsigned char *dev_bitmap;
Sphere *s;
};
__global__ void kernel(Sphere *s,unsigned char *ptr) {
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*blockDim.x*gridDim.x;
float ox = (x - DIM / 2);
float oy = (y - DIM / 2);
float r = 0, g = 0, b = 0;
float maxz = -INF;
for (int i = 0; i < SPHERES; i++) {
float n;
float t = s[i].hit(ox, oy, &n);
if (t > maxz) {
float fscale = n;
r = s[i].r*fscale;
g = s[i].g*fscale;
b = s[i].b*fscale;
}
}
ptr[offset * 4 + 0] = (int)(r * 255);
ptr[offset * 4 + 1] = (int)(g * 255);
ptr[offset * 4 + 2] = (int)(b * 255);
ptr[offset * 4 + 3] = 255;
}
int main(void) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
DataBlock data;
CPUBitmap bitmap(DIM, DIM, &data);
unsigned char *dev_bitmap;
Sphere *s;
hipMalloc((void**)&dev_bitmap, bitmap.image_size());
hipMalloc((void**)&s, sizeof(Sphere)*SPHERES);
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere)*SPHERES);
for (int i = 0; i < SPHERES; i++) {
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
hipMemcpy(s, temp_s, sizeof(Sphere)*SPHERES, hipMemcpyHostToDevice);
free(temp_s);
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
kernel << <grids, threads >> > (s,dev_bitmap);
hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost);
bitmap.display_and_exit();
hipFree(dev_bitmap);
hipFree(s);
};
|
d97f507ba67d40ba119cd4ae49f2d55e7685819c.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <cpu_bitmap.h>
#define INF 2e10f
#define rnd(x)(x*rand()/RAND_MAX)
#define SPHERES 20
#define DIM 1024
struct Sphere {
float r, b, g;
float radius;
float x, y, z;
__device__ float hit(float ox, float oy, float *n) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf(radius*radius - dx*dx - dy*dy);
*n = dz / sqrtf(radius*radius);
return dz + z;
}
return -INF;
}
};
struct DataBlock {
unsigned char *dev_bitmap;
Sphere *s;
};
__global__ void kernel(Sphere *s,unsigned char *ptr) {
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*blockDim.x*gridDim.x;
float ox = (x - DIM / 2);
float oy = (y - DIM / 2);
float r = 0, g = 0, b = 0;
float maxz = -INF;
for (int i = 0; i < SPHERES; i++) {
float n;
float t = s[i].hit(ox, oy, &n);
if (t > maxz) {
float fscale = n;
r = s[i].r*fscale;
g = s[i].g*fscale;
b = s[i].b*fscale;
}
}
ptr[offset * 4 + 0] = (int)(r * 255);
ptr[offset * 4 + 1] = (int)(g * 255);
ptr[offset * 4 + 2] = (int)(b * 255);
ptr[offset * 4 + 3] = 255;
}
int main(void) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
DataBlock data;
CPUBitmap bitmap(DIM, DIM, &data);
unsigned char *dev_bitmap;
Sphere *s;
cudaMalloc((void**)&dev_bitmap, bitmap.image_size());
cudaMalloc((void**)&s, sizeof(Sphere)*SPHERES);
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere)*SPHERES);
for (int i = 0; i < SPHERES; i++) {
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
cudaMemcpy(s, temp_s, sizeof(Sphere)*SPHERES, cudaMemcpyHostToDevice);
free(temp_s);
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
kernel << <grids, threads >> > (s,dev_bitmap);
cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost);
bitmap.display_and_exit();
cudaFree(dev_bitmap);
cudaFree(s);
};
|
aedadd91ef1ba9492c1f4b14ba785fce04fa70a8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Center assignments
Written by Jiageng Mao
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
__device__ float limit_period(float val, float offset, float period){
float rval = val - floor(val / period + offset) * period;
return rval;
}
__device__ float gaussian_radius(float height, float width, float min_overlap){
float a1 = 1;
float b1 = (height + width);
float c1 = width * height * (1 - min_overlap) / (1 + min_overlap);
float sq1 = sqrt(b1 * b1 - 4 * a1 * c1);
float r1 = (b1 + sq1) / 2;
float a2 = 4;
float b2 = 2 * (height + width);
float c2 = (1 - min_overlap) * width * height;
float sq2 = sqrt(b2 * b2 - 4 * a2 * c2);
float r2 = (b2 + sq2) / 2;
float a3 = 4 * min_overlap;
float b3 = -2 * min_overlap * (height + width);
float c3 = (min_overlap - 1) * width * height;
float sq3 = sqrt(b3 * b3 - 4 * a3 * c3);
float r3 = (b3 + sq3) / 2;
return min(min(r1, r2), r3);
}
__global__ void draw_center_kernel(int batch_size, int max_boxes, int max_objs, int num_cls, int H, int W, int code_size, int min_radius,
float voxel_x, float voxel_y, float range_x, float range_y, float out_factor, float gaussian_overlap,
const float *gt_boxes, float *heatmap, int *gt_ind, int *gt_mask, int *gt_cat, float *gt_box_encoding, int *gt_cnt){
/*
Args:
gt_boxes: (B, max_boxes, 8 or 10) with class labels
heatmap: (B, num_cls, H, W)
gt_ind: (B, num_cls, max_objs)
gt_mask: (B, num_cls, max_objs)
gt_cat: (B, num_cls, max_objs)
gt_box_encoding: (B, num_cls, max_objs, code_size) sin/cos
gt_cnt: (B, num_cls)
*/
int bs_idx = blockIdx.x;
int box_idx = threadIdx.x;
if (bs_idx >= batch_size || box_idx >= max_boxes) return;
// move pointer
gt_boxes += bs_idx * max_boxes * code_size;
heatmap += bs_idx * num_cls * H * W;
gt_ind += bs_idx * num_cls * max_objs;
gt_mask += bs_idx * num_cls * max_objs;
gt_cat += bs_idx * num_cls * max_objs;
gt_box_encoding += bs_idx * num_cls * max_objs * code_size;
gt_cnt += bs_idx * num_cls;
// gt box parameters
float x = gt_boxes[box_idx * code_size + 0];
float y = gt_boxes[box_idx * code_size + 1];
float z = gt_boxes[box_idx * code_size + 2];
float dx = gt_boxes[box_idx * code_size + 3];
float dy = gt_boxes[box_idx * code_size + 4];
float dz = gt_boxes[box_idx * code_size + 5];
// origin dx/dy/dz is for box_encodings
float origin_dx = gt_boxes[box_idx * code_size + 3];
float origin_dy = gt_boxes[box_idx * code_size + 4];
float origin_dz = gt_boxes[box_idx * code_size + 5];
float rot = gt_boxes[box_idx * code_size + 6];
float vel_x = 0;
float vel_y = 0;
float cls = 0;
if (code_size == 10) {
vel_x = gt_boxes[box_idx * code_size + 7];
vel_y = gt_boxes[box_idx * code_size + 8];
cls = gt_boxes[box_idx * code_size + 9];
} else if (code_size == 8) {
cls = gt_boxes[box_idx * code_size + 7];
} else {
return;
}
// box not defined
if (dx == 0 || dy == 0 || dz == 0) return;
// cls begin from 1
int cls_idx = (int) cls - 1;
heatmap += cls_idx * H * W;
gt_ind += cls_idx * max_objs;
gt_mask += cls_idx * max_objs;
gt_cat += cls_idx * max_objs;
gt_box_encoding += cls_idx * max_objs * code_size;
gt_cnt += cls_idx;
// transform to bev map coords
float offset = 0.5;
float period = 6.283185307179586;
rot = limit_period(rot, offset, period);
dx = dx / voxel_x / out_factor;
dy = dy / voxel_y / out_factor;
float radius = gaussian_radius(dy, dx, gaussian_overlap);
int radius_int = max(min_radius, (int) radius);
float coor_x = (x - range_x) / voxel_x / out_factor;
float coor_y = (y - range_y) / voxel_y / out_factor;
int coor_x_int = (int) coor_x;
int coor_y_int = (int) coor_y;
if (coor_x_int >= W || coor_x_int < 0) return;
if (coor_y_int >= H || coor_y_int < 0) return;
// draw gaussian map
float div_factor = 6.0;
float sigma = (2 * radius_int + 1) / div_factor;
for (int scan_y = -radius_int; scan_y < radius_int + 1; scan_y++){
if (coor_y_int + scan_y < 0 || coor_y_int + scan_y >= H) continue;
for (int scan_x = -radius_int; scan_x < radius_int + 1; scan_x++){
if (coor_x_int + scan_x < 0 || coor_x_int + scan_x >= W) continue;
float weight = exp(-(scan_x * scan_x + scan_y * scan_y) / (2 * sigma * sigma)); // force convert float sigma
float eps = 0.0000001;
if (weight < eps) weight = 0;
float *w_addr = heatmap + (coor_y_int + scan_y) * W + (coor_x_int + scan_x);
float old_weight = atomicExch(w_addr, weight);
if (old_weight > weight) weight = atomicExch(w_addr, old_weight);
}
}
int obj_idx = atomicAdd(gt_cnt, 1);
if (obj_idx >= max_objs) return;
gt_ind[obj_idx] = coor_y_int * W + coor_x_int;
gt_mask[obj_idx] = 1;
gt_cat[obj_idx] = cls_idx + 1; // begin from 1
gt_box_encoding[obj_idx * code_size + 0] = coor_x - coor_x_int;
gt_box_encoding[obj_idx * code_size + 1] = coor_y - coor_y_int;
gt_box_encoding[obj_idx * code_size + 2] = z;
gt_box_encoding[obj_idx * code_size + 3] = origin_dx;
gt_box_encoding[obj_idx * code_size + 4] = origin_dy;
gt_box_encoding[obj_idx * code_size + 5] = origin_dz;
gt_box_encoding[obj_idx * code_size + 6] = sin(rot);
gt_box_encoding[obj_idx * code_size + 7] = cos(rot);
if (code_size == 10) {
gt_box_encoding[obj_idx * code_size + 8] = vel_x;
gt_box_encoding[obj_idx * code_size + 9] = vel_y;
}
return;
}
void draw_center_kernel_launcher(int batch_size, int max_boxes, int max_objs, int num_cls, int H, int W, int code_size, int min_radius,
float voxel_x, float voxel_y, float range_x, float range_y, float out_factor, float gaussian_overlap,
const float *gt_boxes, float *heatmap, int *gt_ind, int *gt_mask, int *gt_cat, float *gt_box_encoding, int *gt_cnt){
hipError_t err;
dim3 blocks(batch_size);
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( draw_center_kernel), dim3(blocks), dim3(threads), 0, 0, batch_size, max_boxes, max_objs, num_cls, H, W, code_size, min_radius,
voxel_x, voxel_y, range_x, range_y, out_factor, gaussian_overlap,
gt_boxes, heatmap, gt_ind, gt_mask, gt_cat, gt_box_encoding, gt_cnt);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
#ifdef DEBUG
hipDeviceSynchronize(); // for using printf in kernel function
#endif
}
|
aedadd91ef1ba9492c1f4b14ba785fce04fa70a8.cu
|
/*
Center assignments
Written by Jiageng Mao
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
__device__ float limit_period(float val, float offset, float period){
float rval = val - floor(val / period + offset) * period;
return rval;
}
__device__ float gaussian_radius(float height, float width, float min_overlap){
float a1 = 1;
float b1 = (height + width);
float c1 = width * height * (1 - min_overlap) / (1 + min_overlap);
float sq1 = sqrt(b1 * b1 - 4 * a1 * c1);
float r1 = (b1 + sq1) / 2;
float a2 = 4;
float b2 = 2 * (height + width);
float c2 = (1 - min_overlap) * width * height;
float sq2 = sqrt(b2 * b2 - 4 * a2 * c2);
float r2 = (b2 + sq2) / 2;
float a3 = 4 * min_overlap;
float b3 = -2 * min_overlap * (height + width);
float c3 = (min_overlap - 1) * width * height;
float sq3 = sqrt(b3 * b3 - 4 * a3 * c3);
float r3 = (b3 + sq3) / 2;
return min(min(r1, r2), r3);
}
__global__ void draw_center_kernel(int batch_size, int max_boxes, int max_objs, int num_cls, int H, int W, int code_size, int min_radius,
float voxel_x, float voxel_y, float range_x, float range_y, float out_factor, float gaussian_overlap,
const float *gt_boxes, float *heatmap, int *gt_ind, int *gt_mask, int *gt_cat, float *gt_box_encoding, int *gt_cnt){
/*
Args:
gt_boxes: (B, max_boxes, 8 or 10) with class labels
heatmap: (B, num_cls, H, W)
gt_ind: (B, num_cls, max_objs)
gt_mask: (B, num_cls, max_objs)
gt_cat: (B, num_cls, max_objs)
gt_box_encoding: (B, num_cls, max_objs, code_size) sin/cos
gt_cnt: (B, num_cls)
*/
int bs_idx = blockIdx.x;
int box_idx = threadIdx.x;
if (bs_idx >= batch_size || box_idx >= max_boxes) return;
// move pointer
gt_boxes += bs_idx * max_boxes * code_size;
heatmap += bs_idx * num_cls * H * W;
gt_ind += bs_idx * num_cls * max_objs;
gt_mask += bs_idx * num_cls * max_objs;
gt_cat += bs_idx * num_cls * max_objs;
gt_box_encoding += bs_idx * num_cls * max_objs * code_size;
gt_cnt += bs_idx * num_cls;
// gt box parameters
float x = gt_boxes[box_idx * code_size + 0];
float y = gt_boxes[box_idx * code_size + 1];
float z = gt_boxes[box_idx * code_size + 2];
float dx = gt_boxes[box_idx * code_size + 3];
float dy = gt_boxes[box_idx * code_size + 4];
float dz = gt_boxes[box_idx * code_size + 5];
// origin dx/dy/dz is for box_encodings
float origin_dx = gt_boxes[box_idx * code_size + 3];
float origin_dy = gt_boxes[box_idx * code_size + 4];
float origin_dz = gt_boxes[box_idx * code_size + 5];
float rot = gt_boxes[box_idx * code_size + 6];
float vel_x = 0;
float vel_y = 0;
float cls = 0;
if (code_size == 10) {
vel_x = gt_boxes[box_idx * code_size + 7];
vel_y = gt_boxes[box_idx * code_size + 8];
cls = gt_boxes[box_idx * code_size + 9];
} else if (code_size == 8) {
cls = gt_boxes[box_idx * code_size + 7];
} else {
return;
}
// box not defined
if (dx == 0 || dy == 0 || dz == 0) return;
// cls begin from 1
int cls_idx = (int) cls - 1;
heatmap += cls_idx * H * W;
gt_ind += cls_idx * max_objs;
gt_mask += cls_idx * max_objs;
gt_cat += cls_idx * max_objs;
gt_box_encoding += cls_idx * max_objs * code_size;
gt_cnt += cls_idx;
// transform to bev map coords
float offset = 0.5;
float period = 6.283185307179586;
rot = limit_period(rot, offset, period);
dx = dx / voxel_x / out_factor;
dy = dy / voxel_y / out_factor;
float radius = gaussian_radius(dy, dx, gaussian_overlap);
int radius_int = max(min_radius, (int) radius);
float coor_x = (x - range_x) / voxel_x / out_factor;
float coor_y = (y - range_y) / voxel_y / out_factor;
int coor_x_int = (int) coor_x;
int coor_y_int = (int) coor_y;
if (coor_x_int >= W || coor_x_int < 0) return;
if (coor_y_int >= H || coor_y_int < 0) return;
// draw gaussian map
float div_factor = 6.0;
float sigma = (2 * radius_int + 1) / div_factor;
for (int scan_y = -radius_int; scan_y < radius_int + 1; scan_y++){
if (coor_y_int + scan_y < 0 || coor_y_int + scan_y >= H) continue;
for (int scan_x = -radius_int; scan_x < radius_int + 1; scan_x++){
if (coor_x_int + scan_x < 0 || coor_x_int + scan_x >= W) continue;
float weight = exp(-(scan_x * scan_x + scan_y * scan_y) / (2 * sigma * sigma)); // force convert float sigma
float eps = 0.0000001;
if (weight < eps) weight = 0;
float *w_addr = heatmap + (coor_y_int + scan_y) * W + (coor_x_int + scan_x);
float old_weight = atomicExch(w_addr, weight);
if (old_weight > weight) weight = atomicExch(w_addr, old_weight);
}
}
int obj_idx = atomicAdd(gt_cnt, 1);
if (obj_idx >= max_objs) return;
gt_ind[obj_idx] = coor_y_int * W + coor_x_int;
gt_mask[obj_idx] = 1;
gt_cat[obj_idx] = cls_idx + 1; // begin from 1
gt_box_encoding[obj_idx * code_size + 0] = coor_x - coor_x_int;
gt_box_encoding[obj_idx * code_size + 1] = coor_y - coor_y_int;
gt_box_encoding[obj_idx * code_size + 2] = z;
gt_box_encoding[obj_idx * code_size + 3] = origin_dx;
gt_box_encoding[obj_idx * code_size + 4] = origin_dy;
gt_box_encoding[obj_idx * code_size + 5] = origin_dz;
gt_box_encoding[obj_idx * code_size + 6] = sin(rot);
gt_box_encoding[obj_idx * code_size + 7] = cos(rot);
if (code_size == 10) {
gt_box_encoding[obj_idx * code_size + 8] = vel_x;
gt_box_encoding[obj_idx * code_size + 9] = vel_y;
}
return;
}
void draw_center_kernel_launcher(int batch_size, int max_boxes, int max_objs, int num_cls, int H, int W, int code_size, int min_radius,
float voxel_x, float voxel_y, float range_x, float range_y, float out_factor, float gaussian_overlap,
const float *gt_boxes, float *heatmap, int *gt_ind, int *gt_mask, int *gt_cat, float *gt_box_encoding, int *gt_cnt){
cudaError_t err;
dim3 blocks(batch_size);
dim3 threads(THREADS_PER_BLOCK);
draw_center_kernel<<<blocks, threads>>>(batch_size, max_boxes, max_objs, num_cls, H, W, code_size, min_radius,
voxel_x, voxel_y, range_x, range_y, out_factor, gaussian_overlap,
gt_boxes, heatmap, gt_ind, gt_mask, gt_cat, gt_box_encoding, gt_cnt);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
|
4004d65e204db38b8d7a96fc6d3e079500751b6b.hip
|
// !!! This is a file automatically generated by hipify!!!
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_scan_cuda
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
typedef Tensor<float, 1>::DimensionPair DimPair;
template<int DataLayout>
void test_cuda_cumsum(int m_size, int k_size, int n_size)
{
std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl;
Tensor<float, 3, DataLayout> t_input(m_size, k_size, n_size);
Tensor<float, 3, DataLayout> t_result(m_size, k_size, n_size);
Tensor<float, 3, DataLayout> t_result_gpu(m_size, k_size, n_size);
t_input.setRandom();
std::size_t t_input_bytes = t_input.size() * sizeof(float);
std::size_t t_result_bytes = t_result.size() * sizeof(float);
float* d_t_input;
float* d_t_result;
hipMalloc((void**)(&d_t_input), t_input_bytes);
hipMalloc((void**)(&d_t_result), t_result_bytes);
hipMemcpy(d_t_input, t_input.data(), t_input_bytes, hipMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> >
gpu_t_input(d_t_input, Eigen::array<int, 3>(m_size, k_size, n_size));
Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> >
gpu_t_result(d_t_result, Eigen::array<int, 3>(m_size, k_size, n_size));
gpu_t_result.device(gpu_device) = gpu_t_input.cumsum(1);
t_result = t_input.cumsum(1);
hipMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, hipMemcpyDeviceToHost);
for (size_t i = 0; i < t_result.size(); i++) {
if (fabs(t_result(i) - t_result_gpu(i)) < 1e-4f) {
continue;
}
if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), 1e-4f)) {
continue;
}
std::cout << "mismatch detected at index " << i << ": " << t_result(i)
<< " vs " << t_result_gpu(i) << std::endl;
assert(false);
}
hipFree((void*)d_t_input);
hipFree((void*)d_t_result);
}
void test_cxx11_tensor_scan_cuda()
{
CALL_SUBTEST_1(test_cuda_cumsum<ColMajor>(128, 128, 128));
CALL_SUBTEST_2(test_cuda_cumsum<RowMajor>(128, 128, 128));
}
|
4004d65e204db38b8d7a96fc6d3e079500751b6b.cu
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_scan_cuda
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
typedef Tensor<float, 1>::DimensionPair DimPair;
template<int DataLayout>
void test_cuda_cumsum(int m_size, int k_size, int n_size)
{
std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl;
Tensor<float, 3, DataLayout> t_input(m_size, k_size, n_size);
Tensor<float, 3, DataLayout> t_result(m_size, k_size, n_size);
Tensor<float, 3, DataLayout> t_result_gpu(m_size, k_size, n_size);
t_input.setRandom();
std::size_t t_input_bytes = t_input.size() * sizeof(float);
std::size_t t_result_bytes = t_result.size() * sizeof(float);
float* d_t_input;
float* d_t_result;
cudaMalloc((void**)(&d_t_input), t_input_bytes);
cudaMalloc((void**)(&d_t_result), t_result_bytes);
cudaMemcpy(d_t_input, t_input.data(), t_input_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> >
gpu_t_input(d_t_input, Eigen::array<int, 3>(m_size, k_size, n_size));
Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> >
gpu_t_result(d_t_result, Eigen::array<int, 3>(m_size, k_size, n_size));
gpu_t_result.device(gpu_device) = gpu_t_input.cumsum(1);
t_result = t_input.cumsum(1);
cudaMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, cudaMemcpyDeviceToHost);
for (size_t i = 0; i < t_result.size(); i++) {
if (fabs(t_result(i) - t_result_gpu(i)) < 1e-4f) {
continue;
}
if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), 1e-4f)) {
continue;
}
std::cout << "mismatch detected at index " << i << ": " << t_result(i)
<< " vs " << t_result_gpu(i) << std::endl;
assert(false);
}
cudaFree((void*)d_t_input);
cudaFree((void*)d_t_result);
}
void test_cxx11_tensor_scan_cuda()
{
CALL_SUBTEST_1(test_cuda_cumsum<ColMajor>(128, 128, 128));
CALL_SUBTEST_2(test_cuda_cumsum<RowMajor>(128, 128, 128));
}
|
903384e78415b21cabc487a4a57f9bc4db6d7647.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#ifndef NOMATLAB
#include "mex.h"
#endif
// CUDA
#include "hip/hip_runtime.h"
#include "roctracer/roctx.h"
#include "cudaCommon.h"
#include "cudaSourceScalarPotential.h"
#include "cudaGradientKernels.h"
#include "cudaTestSourceComposite.h"
#define SRCBLOCKX 16
#define SRCBLOCKY 16
__global__ void cukern_FetchPartitionSubset1DExtrap(double *in, int nodeN, double *out, int partX0, int partNX);
template <geometryType_t coords>
__global__ void cukern_sourceComposite_IMP(double *fluidIn, double *Rvector, double *gravgrad, long pitch);
template <geometryType_t coords>
__global__ void cukern_sourceComposite_RK4(double *fluidIn, double *Rvector, double *gravgrad, long pitch);
// This will probably be slow as balls but should provide a golden standard of accuracy
template <geometryType_t coords>
__global__ void cukern_sourceComposite_GL4(double *fluidIn, double *Rvector, double *gravgrad, long pitch);
template <geometryType_t coords>
__global__ void cukern_sourceComposite_GL6(double *fluidIn, double *Rvector, double *gravgrad, long pitch);
__constant__ __device__ double devLambda[12];
#define LAMX devLambda[0]
#define LAMY devLambda[1]
#define LAMZ devLambda[2]
// Define: F = -beta * rho * grad(phi)
// rho_g = density for full effect of gravity
// rho_c = minimum density to feel gravity at all
// beta = { rho_g < rho : 1 (NORMAL GRAVITY) }
// { rho_c < rho < rho_g : [(rho-rho_c)/(rho_g-rho_c)]^2 }
// { rho < rho_c : 0 }
// This provides a continuous (though not differentiable at rho = rho_g) way to surpress gravitation of the background fluid
// The original process of cutting gravity off below a critical density a few times the minimum
// density is believed to cause "blowups" at the inner edge of circular flow profiles due to being
// discontinuous. If even smoothness is insufficient and smooth differentiability is required,
// a more-times-continuous profile can be constructed, but let's not go there unless forced.
// Density below which we force gravity effects to zero
#define RHO_FULLG devLambda[3]
#define RHO_NOG devLambda[4]
// 1 / (rho_fullg - rho_nog)
#define G1 devLambda[5]
// rho_nog / (rho_fullg - rho_nog)
#define G2 devLambda[6]
#define RINNER devLambda[7]
#define DELTAR devLambda[8]
// __constant__ parameters for the rotating frame terms
#define OMEGA devLambda[9]
#define DT devLambda[10]
#define TWO_OMEGA_T devLambda[11]
__constant__ __device__ int devIntParams[3];
#ifdef STANDALONE_MEX_FUNCTION
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if ((nrhs!=5) || (nlhs != 0)) mexErrMsgTxt("Wrong number of arguments: need cudaTestSourceComposite(FluidManager, phi, GeometryManager, [rhomin, rho_fullg, dt, spaceorder], [xvector yvector])\n");
CHECK_CUDA_ERROR("entering cudaSourceRotatingFrame");
// Get source array info and create destination arrays
MGArray fluid[5], gravPot, xyvec;
/* FIXME: accept this as a matlab array instead
* FIXME: Transfer appropriate segments to __constant__ memory
* FIXME: that seems the only reasonable way to avoid partitioning hell
*/
double *scalars = mxGetPr(prhs[3]);
if(mxGetNumberOfElements(prhs[3]) != 5) {
PRINT_FAULT_HEADER;
printf("The 4th argument must be a five element vector: [rho_nog, rho_fullg, dt, space order, temporal order]. It contains %lui elements.\n", mxGetNumberOfElements(prhs[3]));
PRINT_FAULT_FOOTER;
DROP_MEX_ERROR("Invalid arguments, brah!");
}
double rhonog= scalars[0];
double rhofg = scalars[1];
double dt = scalars[2];
int spaceOrder = (int)scalars[3];
int timeOrder = (int)scalars[4];
GeometryParams geom = accessMatlabGeometryClass(prhs[2]);
int status;
status = MGA_accessMatlabArrays(prhs, 4, 4, &xyvec);
if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to access X-Y vector."); }
if(spaceOrder != 0) {
status = MGA_accessMatlabArrays(prhs, 1, 1, &gravPot);
if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to access gravity potential array."); }
}
dim3 gridsize, blocksize;
int numFluids = mxGetNumberOfElements(prhs[0]);
int fluidct;
// Allocate one buffer to be used if we have multiple fluids
MGArray tempSlab;
tempSlab.nGPUs = -1; // nonallocated marker
for(fluidct = 0; fluidct < numFluids; fluidct++) {
status = MGA_accessFluidCanister(prhs[0], fluidct, &fluid[0]);
if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) break;
mxArray *q = derefXatNdotAdotB(prhs[0], fluidct, "MINMASS", NULL);
double rhomin = *mxGetPr(q);
double rhonog= rhomin * 4; // FIXME this is a test hack
double rhofg = rhomin * 4.1;
status = sourcefunction_Composite(&fluid[0], &gravPot, &xyvec, geom, rhonog, rhofg, dt, spaceOrder, timeOrder, &tempSlab);
if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to apply rotating frame source terms."); }
}
MGA_delete(&tempSlab);
}
#endif
int sourcefunction_Composite(MGArray *fluid, MGArray *phi, MGArray *XYVectors, GeometryParams geom, double rhoNoG, double rhoFullGravity, double dt, int spaceOrder, int timeOrder, MGArray *storageBuffer)
{
#ifdef USE_NVTX
roctxRangePush(__FUNCTION__);
#endif
dim3 gridsize, blocksize;
int3 arraysize;
double lambda[11];
int i;
int worked;
double *devXYset[fluid->nGPUs];
int sub[6];
double *dx = &geom.h[0];
lambda[3] = rhoFullGravity;
lambda[4] = rhoNoG;
lambda[5] = 1.0/(rhoFullGravity - rhoNoG);
lambda[6] = rhoNoG/(rhoFullGravity - rhoNoG);
lambda[7] = geom.Rinner; // This is actually overwritten per partition below
lambda[8] = dx[1];
lambda[9] = geom.frameOmega;
lambda[10]= dt;
//int isThreeD = (fluid->dim[2] > 1);
int isRZ = (fluid->dim[2] > 1) & (fluid->dim[1] == 1);
MGArray gradslab;
gradslab.nGPUs = -1;
int usingLocalStorage = 0;
// if we get no buffer then allocate local storage
if(storageBuffer == NULL) {
usingLocalStorage = 1;
storageBuffer = &gradslab;
}
if(storageBuffer->nGPUs == -1) { // need to allocate it
#ifdef USE_NVTX
roctxMark("cudaTestSourceComposite.cu:182 large malloc 3 slabs");
#endif
worked = MGA_allocSlab(phi, storageBuffer, 3);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked;
}
MGArray *gs = storageBuffer;
worked = computeCentralGradient(phi, gs, geom, spaceOrder, dt);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked;
for(i = 0; i < fluid->nGPUs; i++) {
hipSetDevice(fluid->deviceID[i]);
calcPartitionExtent(fluid, i, &sub[0]);
lambda[7] = geom.Rinner + dx[0] * sub[0]; // Innermost cell coord may change per-partition
hipMemcpyToSymbol((const void *)devLambda, lambda, 11*sizeof(double), 0, hipMemcpyHostToDevice);
worked = CHECK_CUDA_ERROR("hipMemcpyToSymbol");
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) break;
hipMemcpyToSymbol((const void *)devIntParams, &sub[3], 3*sizeof(int), 0, hipMemcpyHostToDevice);
worked = CHECK_CUDA_ERROR("memcpy to symbol");
if(worked != SUCCESSFUL) break;
hipMalloc((void **)&devXYset[i], (sub[3]+sub[4])*sizeof(double));
worked = CHECK_CUDA_ERROR("malloc devXYset");
if(worked != SUCCESSFUL) break;
}
if(worked != SUCCESSFUL) return worked;
double *fpi;
// Iterate over all partitions, and here we GO!
for(i = 0; i < fluid->nGPUs; i++) {
hipSetDevice(fluid->deviceID[i]);
worked = CHECK_CUDA_ERROR("hipSetDevice");
if(worked != SUCCESSFUL) break;
calcPartitionExtent(fluid, i, sub);
arraysize.x = sub[3]; arraysize.y = sub[4]; arraysize.z = sub[5];
fpi = fluid->devicePtr[i]; // save some readability below...
// This section extracts the portions of the supplied partition-cloned [X;Y] vector relevant to the current partition
blocksize = makeDim3(128, 1, 1);
gridsize.x = ROUNDUPTO(sub[3], 128) / 128;
gridsize.y = gridsize.z = 1;
hipLaunchKernelGGL(( cukern_FetchPartitionSubset1DExtrap), dim3(gridsize), dim3(blocksize), 0, 0, XYVectors->devicePtr[i], fluid->dim[0], devXYset[i], sub[0], sub[3]);
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, XYVectors, i, "cukern_FetchPartitionSubset1DExtrap, X");
if(worked != SUCCESSFUL) break;
gridsize.x = ROUNDUPTO(sub[4], 128) / 128;
hipLaunchKernelGGL(( cukern_FetchPartitionSubset1DExtrap), dim3(gridsize), dim3(blocksize), 0, 0, XYVectors->devicePtr[i] + fluid->dim[0], fluid->dim[1], devXYset[i]+sub[3], sub[1], sub[4]);
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, XYVectors, i, "cukern_FetchPartitionSubset1DExtrap, Y");
if(worked != SUCCESSFUL) break;
// Prepare to launch the solver itself!
arraysize.x = sub[3]; arraysize.y = sub[4]; arraysize.z = sub[5];
blocksize = makeDim3(SRCBLOCKX, SRCBLOCKY, 1);
gridsize.x = ROUNDUPTO(arraysize.x, blocksize.x) / blocksize.x;
gridsize.y = (isRZ) ? 1 : arraysize.z;
gridsize.z = 1;
switch(timeOrder) {
case 2:
if(isRZ) {
if(geom.shape == SQUARE) {
hipLaunchKernelGGL(( cukern_sourceComposite_IMP<RZSQUARE>), dim3(gridsize), dim3(blocksize), 0, 0, fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
} else {
hipLaunchKernelGGL(( cukern_sourceComposite_IMP<RZCYLINDRICAL>), dim3(gridsize), dim3(blocksize), 0, 0, fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
}
} else {
if(geom.shape == SQUARE) {
hipLaunchKernelGGL(( cukern_sourceComposite_IMP<SQUARE>), dim3(gridsize), dim3(blocksize), 0, 0, fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
} else {
hipLaunchKernelGGL(( cukern_sourceComposite_IMP<CYLINDRICAL>), dim3(gridsize), dim3(blocksize), 0, 0, fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
}
}
break;
case 4:
if(isRZ) {
if(geom.shape == SQUARE) {
worked = ERROR_NOIMPLEMENT; break;
//cukern_sourceComposite_GL4<RZSQUARE><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
} else {
hipLaunchKernelGGL(( cukern_sourceComposite_GL4<RZCYLINDRICAL>), dim3(gridsize), dim3(blocksize), 0, 0, fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
}
} else {
if(geom.shape == SQUARE) {
worked = ERROR_NOIMPLEMENT; break;
//cukern_sourceComposite_GL4<SQUARE><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
} else {
hipLaunchKernelGGL(( cukern_sourceComposite_GL4<CYLINDRICAL>), dim3(gridsize), dim3(blocksize), 0, 0, fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
}
}
break;
case 6:
if(isRZ) {
if(geom.shape == SQUARE) {
worked = ERROR_NOIMPLEMENT; break;
//cukern_sourceComposite_GL6<RZSQUARE><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
} else {
hipLaunchKernelGGL(( cukern_sourceComposite_GL6<RZCYLINDRICAL>), dim3(gridsize), dim3(blocksize), 0, 0, fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
}
} else {
if(geom.shape == SQUARE) {
worked = ERROR_NOIMPLEMENT; break;
//cukern_sourceComposite_GL6<SQUARE><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
} else {
hipLaunchKernelGGL(( cukern_sourceComposite_GL6<CYLINDRICAL>), dim3(gridsize), dim3(blocksize), 0, 0, fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
}
}
break;
default:
PRINT_FAULT_HEADER;
printf("Source function requires a temporal order of 2 (implicit midpt), 4 (Gauss-Legendre 4th order) or 6 (GL-6th): Received %i\n", timeOrder);
PRINT_FAULT_FOOTER;
break;
}
if(worked != SUCCESSFUL) break;
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, fluid, i, "cukernSourceComposite");
if(worked != SUCCESSFUL) break;
}
if(worked != SUCCESSFUL) return worked;
worked = MGA_exchangeLocalHalos(fluid, 5);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked;
int j; // This will halt at the stage failed upon if CUDA barfed above
#ifdef USE_NVTX
roctxMark("Freeing devXYset");
#endif
for(j = 0; j < i; j++) {
hipFree((void *)devXYset[j]);
}
if(usingLocalStorage) {
#ifdef USE_NVTX
roctxMark("cudaTestSourceComposite.cu:323 large free");
#endif
MGA_delete(gs);
}
// Don't bother checking hipFree if we already have an error caused above, it was just trying to clean up the barf
if(worked == SUCCESSFUL)
worked = CHECK_CUDA_ERROR("hipFree");
#ifdef USE_NVTX
roctxRangePop();
#endif
return CHECK_IMOGEN_ERROR(worked);
}
/* The equations of motion for a rotating frame:
*
* a = -[2 w X v + w X (w X r) ]
* dv = -[2 w X v + w X (w X r) ] dt
* dp = -rho dv = -rho [[2 w X v + w X (w X r) ] dt
* dp = -[2 w X p + rho w X (w X r) ] dt
*
* w X p = |I J K | = <-w py, w px, 0> = u
* |0 0 w |
* |px py pz|
*
* w X r = <-w y, w x, 0> = s;
* w X s = |I J K| = <-w^2 x, -w^2 y, 0> = -w^2<x,y,0> = b
* |0 0 w|
* |-wy wx 0|
* dp = -[2 u + rho b] dt
* = -[2 w<-py, px, 0> - rho w^2 <x, y, 0>] dt
* = w dt [2<py, -px> + rho w <x, y>] in going to static frame
*
* dE = -v dot dp
*/
/* rho, E, Px, Py, Pz: arraysize-sized arrays
omega: scalar
Rx: [nx 1 1] sized array
Ry: [ny 1 1] sized array */
#define JACOBI_ITER_MAX 3
#define NTH (SRCBLOCKX*SRCBLOCKY)
/* Solves the combined equations of a rotating frame and gravity,
*
* d/dt[ px ] = - rho (2 w X v + w X (w X r)).xhat - rho dphi/dx
* [ py ] = - rho (2 w X v + w X (w X r)).yhat - rho dphi/dy
* [ pz ] = - rho (2 w X v + w X (w X r)).zhat - rho dphi/dz
* [ E ] = p.dp/2
*
* in either SQUARE or CYLINDRICAL coordinates using the implicit midpoint method,
*
* y_half = y_0 + .5 dt f(y_half);
* y_1 = y_0 + dt f(y_half);
*
* The implicit equations are iterated using JACOBI_ITER_MAX Jacobi steps updating vx then vy.
* Frame rotation is always in the z-hat direction so no nonlinearity appears in the z direction.
*/
template <geometryType_t coords>
__global__ void cukern_sourceComposite_IMP(double *fluidIn, double *Rvector, double *gravgrad, long pitch)
{
__shared__ double shar[4*SRCBLOCKX*SRCBLOCKY];
//__shared__ double px0[SRCBLOCKX*SRCBLOCKY], py0[SRCBLOCKX*SRCBLOCKY];
/* strategy: XY files, fill in X direction, step in Y direction; griddim.y = Nz */
int myx = threadIdx.x + SRCBLOCKX*blockIdx.x;
int myy = threadIdx.y;
int myz = blockIdx.y;
int nx = devIntParams[0];
int ny;
if((coords == SQUARE) || (coords == CYLINDRICAL)) { // Not RZ coords
ny = devIntParams[1];
} else {
ny = devIntParams[2];
}
if(myx >= devIntParams[0]) return; // return if x >= nx
// Compute global index at the start
int tileaddr = myx + nx*(myy + ny*myz);
fluidIn += tileaddr;
gravgrad += tileaddr;
tileaddr = threadIdx.x + SRCBLOCKX*threadIdx.y;
double locX = Rvector[myx];
Rvector += nx; // Advances this to the Y array for below
double locY;
if((coords == CYLINDRICAL) || (coords == RZCYLINDRICAL)) locY = 0.0;
double locRho, deltaphi;
double vdel, dener;
double vx0, vy0, vz0, vphi_combined;
int jacobiIters;
for(; myy < ny; myy += SRCBLOCKY) {
// Only in square XY or XYZ coordinates must we account for a centripetal term in the the 2-direction
if((coords == SQUARE) || (coords == RZSQUARE)) {
locY = Rvector[myy];
}
locRho = *fluidIn;
vx0 = fluidIn[2*pitch] / locRho; // convert to vr
vy0 = fluidIn[3*pitch] / locRho; // convert to vy/vphi
vz0 = fluidIn[4*pitch] / locRho;
shar[tileaddr] = vx0;
shar[tileaddr+NTH] = vy0;
// Repeatedly perform fixed point iterations to solve the combined time differential operators
// This yields the implicit Euler value for the midpoint (t = 0.5) if successful
for(jacobiIters = 0; jacobiIters < JACOBI_ITER_MAX; jacobiIters++) {
if((coords == SQUARE) || (coords == RZSQUARE)) {
// Rotating frame contribution, vx
vdel = DT*OMEGA*(OMEGA*locX + 2.0*shar[tileaddr+NTH]); // delta-vx
// Gravity gradient contribution, vx
deltaphi = gravgrad[0];
vdel -= deltaphi;
// store predicted value for vx
shar[tileaddr+2*NTH] = vx0 + .5*vdel;
// rotating frame contribution, vy
vdel = -DT*OMEGA*(OMEGA*locY - 2*shar[tileaddr]);
// gravity gradient contribution, vy
deltaphi = gravgrad[pitch];
vdel -= deltaphi;
// store predicted delta for vy
shar[tileaddr+3*NTH] = vy0 + .5*vdel;
} else {
// Rotating frame contribution + cylindrical contribution, pr
vphi_combined = OMEGA*locX + shar[tileaddr+NTH];
vdel = DT*vphi_combined*vphi_combined / locX; // a = (vphi + r*W)^2 / r
// Gravity gradient contribution, pr
deltaphi = gravgrad[0];
vdel -= deltaphi;
// store predicted value for pr
shar[tileaddr+2*NTH] = vx0 + .5*vdel;
// rotating frame contribution, ptheta
vphi_combined = shar[tileaddr+NTH] + 2*locX*OMEGA; // a = -vr vphi - 2 vr w
vdel = -DT*shar[tileaddr]*vphi_combined / locX;
// gravity gradient contribution, ptheta
deltaphi = gravgrad[pitch];
vdel -= deltaphi;
// store predicted delta for ptheta
shar[tileaddr+3*NTH] = vy0 + .5*vdel;
}
__syncthreads();
shar[tileaddr] = shar[tileaddr+2*NTH];
shar[tileaddr+NTH] = shar[tileaddr+3*NTH];
__syncthreads();
}
// Compute minus the original XY/R-theta kinetic energy density
dener = -(vx0*vx0+vy0*vy0+vz0*vz0);
if((coords == SQUARE) || (coords == RZSQUARE)) {
// Rotating frame contribution, vx
vdel = DT*OMEGA*(OMEGA*locX + 2.0*shar[tileaddr+2*NTH]); // delta-vx
// Gravity gradient contribution, vx
deltaphi = gravgrad[0];
vdel -= deltaphi;
// store value for vx
vx0 += vdel;
// rotating frame contribution, vy
vdel = -DT*OMEGA*(OMEGA*locY - 2*shar[tileaddr+NTH]);
// gravity gradient contribution, vy
deltaphi = gravgrad[pitch];
vdel -= deltaphi;
// store delta for vy
vy0 += vdel;
} else {
// Rotating frame contribution + cylindrical contribution, pr
vphi_combined = OMEGA*locX + shar[tileaddr+NTH];
vdel = DT*vphi_combined*vphi_combined/locX;
// Gravity gradient contribution, pr
deltaphi = gravgrad[0];
vdel -= deltaphi;
// store predicted value for pr
vx0 += vdel;
// rotating frame contribution, ptheta
vphi_combined = shar[tileaddr+NTH] + 2*locX*OMEGA;
vdel = -DT*shar[tileaddr]*vphi_combined/locX;
// gravity gradient contribution, ptheta
deltaphi = gravgrad[pitch];
vdel -= deltaphi;
// store predicted delta for ptheta
vy0 += vdel;
}
// Only a linear force in the Z direction: No need to iterate: Exact solution available
deltaphi = gravgrad[2*pitch];
vz0 -= deltaphi;
// Add the new XY/R-theta kinetic energy density
dener += vx0*vx0+vy0*vy0+vz0*vz0;
fluidIn[2*pitch] = vx0 * locRho;
fluidIn[3*pitch] = vy0 * locRho;
fluidIn[4*pitch] = vz0 * locRho;
// Change in total energy is exactly the work done by forces
fluidIn[pitch] += .5*locRho*dener;
// Hop pointers forward
fluidIn += nx*SRCBLOCKY;
gravgrad+= nx*SRCBLOCKY;
}
}
/* These coefficients define the Butcher tableau of the
* 4th order Gauss-Legendre quadrature method */
#define GL4_C1 0.2113248654051871344705659794271924
#define GL4_C2 0.7886751345948128655294340205728076
#define GL4_A11 .25
#define GL4_A12 -0.03867513459481286552943402057280764
#define GL4_A21 0.5386751345948128655294340205728076
#define GL4_A22 .25
/* Solves the combined equations of a rotating frame and gravity in either
* SQUARE or CYLINDRICAL coordinates using 4th order Gauss-Legendre quadrature.
* This requires simultaneous solution of 2N equations at 2 intermediate points,
* for N=2 (vx and vy) followed by evaluation of the output sum.
*
* The implicit solve makes a forward Euler starter prediction before
* applying Jacobi iterations to update in the order
* vx1, vy1, vx2, vy2
* for up to JACOBI_MAX_ITER times.
*/
template <geometryType_t coords>
__global__ void cukern_sourceComposite_GL4(double *fluidIn, double *Rvector, double *gravgrad, long pitch)
{
__shared__ double shar[4*SRCBLOCKX*SRCBLOCKY];
/* strategy: XY files, fill in X direction, step in Y direction; griddim.y = Nz */
int myx = threadIdx.x + SRCBLOCKX*blockIdx.x;
int myy = threadIdx.y;
int myz = blockIdx.y;
int nx = devIntParams[0];
int ny;
if((coords == SQUARE) || (coords == CYLINDRICAL)) { // Not RZ coords
ny = devIntParams[1];
} else {
ny = devIntParams[2];
}
if(myx >= devIntParams[0]) return; // return if x >= nx
// Compute global index at the start
int tileaddr = myx + nx*(myy + ny*myz);
fluidIn += tileaddr;
gravgrad += tileaddr;
tileaddr = threadIdx.x + SRCBLOCKX*threadIdx.y;
double locX = Rvector[myx];
Rvector += nx; // Advances this to the Y array for below
double locY;
if((coords == CYLINDRICAL) || (coords == RZCYLINDRICAL)) locY = 0.0;
double locRho, deltaphi;
double vdel, dener;
double vxA, vxB, vyA, vyB;
double q1, q2; // temp vars?
int jacobiIters;
for(; myy < ny; myy += SRCBLOCKY) {
// Only in square XY or XYZ coordinates must we account for a centripetal term in the the 2-direction
if((coords == SQUARE) || (coords == RZSQUARE)) {
locY = Rvector[myy];
}
locRho = *fluidIn;
vxA = fluidIn[2*pitch] / locRho; // convert to vr
vyA = fluidIn[3*pitch] / locRho; // convert to vy/vphi
shar[tileaddr] = vxA;
shar[tileaddr+NTH] = vyA;
// Generate a 1st order prediction for what the values will be using fwd euler
// This is worth roughly 1 iteration but as can be seen will take way less time
if((coords == SQUARE) || (coords == RZSQUARE)) {
q1 = DT*OMEGA*(OMEGA*locX + 2.0*vyA) - gravgrad[0]; // delta-vx
q2 = -DT*OMEGA*(OMEGA*locY - 2*vxA) - gravgrad[pitch]; // delta-vy
vxB = vxA + GL4_C2 * q1;
vyB = vxB + GL4_C2 * q2;
vxA += GL4_C1 * q1;
vyA += GL4_C1 * q2;
} else {
q1 = OMEGA*locX + vyA;
q2 = -vxA*(vyA + 2*OMEGA*locX);
deltaphi = gravgrad[0];
vxB = vxA + GL4_C2*(DT*q1*q1/locX - deltaphi);
vxA += GL4_C1*(DT*q1*q1/locX - deltaphi);
deltaphi = gravgrad[pitch];
vyB = vyA + GL4_C2*(DT*q2/locX - deltaphi);
vyA += GL4_C1*(DT*q2/locX - deltaphi);
}
// Repeatedly perform fixed point iterations to solve the combined differential operators
for(jacobiIters = 0; jacobiIters < JACOBI_ITER_MAX; jacobiIters++) {
if((coords == SQUARE) || (coords == RZSQUARE)) {
/////////////////
/////////////// ruh-roh
///////////////
} else {
// Rotating frame contribution + cylindrical contribution, vr, step A
q1 = OMEGA*locX + vyA;
q2 = OMEGA*locX + vyB;
// Gravity gradient contribution, vr, step A
deltaphi = gravgrad[0];
// Improve estimates for radial velocity
vdel = -GL4_C1*deltaphi + DT*(q1*q1*GL4_A11 + q2*q2*GL4_A12)/locX;
vxA = shar[tileaddr] + vdel;
vdel = -GL4_C2*deltaphi + DT*(q1*q1*GL4_A21 + q2*q2*GL4_A22)/locX;
vxB = shar[tileaddr] + vdel;
// Load azimuthal gravity gradient
deltaphi = gravgrad[pitch];
q2 = vxB*(vyB+2*locX*OMEGA);
// Note we leave the GL quadrature coefficient off and can reuse q2
//vdel = -DT*(GL4_A11*2*locX*OMEGA*vxA+GL4_A12*q2)/locX - GL4_C1 * deltaphi;
//vyA = (shar[tileaddr + NTH] + vdel)/(1+DT*GL4_A11*vxA/locX)
vdel = -DT*(GL4_A11*2*locX*OMEGA*vxA+GL4_A12*q2) - GL4_C1 * deltaphi * locX;
vyA = (shar[tileaddr + NTH]*locX + vdel)/(locX+DT*GL4_A11*vxA);
q1 = vxA*(vyA+2*locX*OMEGA);
vdel = -DT*(GL4_A21*q1+GL4_A22*2*locX*OMEGA*vxB) - GL4_C2 * deltaphi * locX;
vyB = (shar[tileaddr+NTH]*locX + vdel)/(locX + DT*GL4_A11*vxA);
}
}
// Compute minus the original kinetic energy density
q1 = shar[tileaddr];
q2 = shar[tileaddr+NTH];
dener = -(q1*q1+q2*q2);
q1 = fluidIn[4*pitch] / locRho;
dener -= q1*q1;
if((coords == SQUARE) || (coords == RZSQUARE)) {
///////////////
//////////// ruh-roh
/////////////
} else {
// evaluate final Vr
q1 = OMEGA*locX + vyA;
q2 = OMEGA*locX + vyB;
deltaphi = gravgrad[0];
shar[tileaddr] = shar[tileaddr] - deltaphi + .5*DT*(q1*q1+q2*q2)/locX;
// evalute final Vphi
deltaphi = gravgrad[pitch];
shar[tileaddr+NTH] = shar[tileaddr+NTH] - deltaphi - .5*DT*(vxA*(vyA+2*OMEGA*locX)+vxB*(vyB+2*OMEGA*locX))/locX;
}
vxA = shar[tileaddr];
vyA = shar[tileaddr+NTH];
// Only a linear force in the Z direction: No need to iterate: Exact solution available
deltaphi = gravgrad[2*pitch];
q1 = fluidIn[4*pitch] / locRho - deltaphi;
// Add the new XY/R-theta kinetic energy density
dener += (vxA*vxA + vyA*vyA + q1*q1);
fluidIn[2*pitch] = vxA * locRho;
fluidIn[3*pitch] = vyA * locRho;
fluidIn[4*pitch] = q1 * locRho;
// Change in total energy is exactly the work done by forces
fluidIn[pitch] += .5*locRho*dener;
// Hop pointers forward
fluidIn += nx*SRCBLOCKY;
gravgrad+= nx*SRCBLOCKY;
}
}
#define GL6_C1 0.28918148932210804453
#define GL6_C2 .5
#define GL6_C3 0.71081851067789195547
#define GL6_A11 0.13888888888888888889
#define GL6_A21 0.30026319498086459244
#define GL6_A31 0.26798833376246945173
#define GL6_A12 -0.035976667524938903456
#define GL6_A22 0.22222222222222222222
#define GL6_A32 0.4804211119693833479
#define GL6_A13 0.0097894440153083260496
#define GL6_A23 -0.02248541720308681466
#define GL6_A33 0.13888888888888888889
#define GL6_B1 0.27777777777777777778
#define GL6_B2 0.44444444444444444444
#define GL6_B3 0.27777777777777777778
/* Solves the combined equations of a rotating frame and gravity
* in either SQUARE or CYLINDRICAL coordinates using 6th order
* Gauss-Legendre quadrature: This requires simultaneous self-consistent
* solution of 3N equations at 3 intermediate points, for N=2 (vx and vy)
* followed by evaluation of the output sum.
*
* The implicit solve makes a forward Euler starter prediction before
* applying Jacobi iterations to update in the order
* vx1, vx2, vx3, vy1, vy2, vy3
* for up to JACOBI_MAX_ITER times.
*/
template <geometryType_t coords>
__global__ void cukern_sourceComposite_GL6(double *fluidIn, double *Rvector, double *gravgrad, long pitch)
{
__shared__ double shar[6*SRCBLOCKX*SRCBLOCKY];
//__shared__ double px0[SRCBLOCKX*SRCBLOCKY], py0[SRCBLOCKX*SRCBLOCKY];
// strategy: XY files, fill in X direction, step in Y direction; griddim.y = Nz
int myx = threadIdx.x + SRCBLOCKX*blockIdx.x;
int myy = threadIdx.y;
int myz = blockIdx.y;
int nx = devIntParams[0];
int ny;
if((coords == SQUARE) || (coords == CYLINDRICAL)) { // Not RZ coords
ny = devIntParams[1];
} else {
ny = devIntParams[2];
}
if(myx >= devIntParams[0]) return; // return if x >= nx
// Compute global index at the start
int tileaddr = myx + nx*(myy + ny*myz);
fluidIn += tileaddr;
gravgrad += tileaddr;
tileaddr = threadIdx.x + SRCBLOCKX*threadIdx.y;
double locX = Rvector[myx];
Rvector += nx; // Advances this to the Y array for below
double locY;
if((coords == CYLINDRICAL) || (coords == RZCYLINDRICAL)) locY = 0.0;
double locRho, deltaphi;
double vdel, dener;
double vxA, vxB, vxC, vyA, vyB, vyC;
double q1, q2, q3; // temp vars?
int jacobiIters;
for(; myy < ny; myy += SRCBLOCKY) {
// Only in square XY or XYZ coordinates must we account for a centripetal term in the the 2-direction
if((coords == SQUARE) || (coords == RZSQUARE)) {
locY = Rvector[myy];
}
locRho = *fluidIn;
vxA = fluidIn[2*pitch] / locRho; // convert to vr
vyA = fluidIn[3*pitch] / locRho; // convert to vy/vphi
shar[tileaddr] = vxA;
shar[tileaddr+NTH] = vyA;
// Generate a 1st order prediction for what the values will be using fwd euler
// This is worth roughly 1 iteration but as can be seen will take way less time
if((coords == SQUARE) || (coords == RZSQUARE)) {
/////
/////
/////
} else {
q1 = OMEGA*locX + vyA;
q2 = -vxA*(vyA + 2*OMEGA*locX);
deltaphi = gravgrad[0];
vxC = vxA + GL6_C3*(DT*q1*q1/locX - deltaphi);
vxB = vxA + GL6_C2*(DT*q1*q1/locX - deltaphi);
vxA += GL6_C1*(DT*q1*q1/locX - deltaphi);
deltaphi = gravgrad[pitch];
vyC = vyA + GL6_C3*(DT*q2/locX - deltaphi);
vyB = vyA + GL6_C2*(DT*q2/locX - deltaphi);
vyA += GL6_C1*(DT*q2/locX - deltaphi);
}
// Repeatedly perform fixed point iterations to solve the combined time differential operators
// This yields the implicit Euler value for the midpoint (t = 0.5) if successful
for(jacobiIters = 0; jacobiIters < JACOBI_ITER_MAX; jacobiIters++) {
if((coords == SQUARE) || (coords == RZSQUARE)) {
///////////////
/////////////// ruh-roh
///////////////
} else {
// Rotating frame contribution + cylindrical contribution, Vr:
// Depends only on Vtheta... improve all estimates for Vr now:
q1 = OMEGA*locX + vyA;
q2 = OMEGA*locX + vyB;
q3 = OMEGA*locX + vyC;
// Gravity gradient contribution, vr
deltaphi = gravgrad[0];
vdel = -GL6_C1*deltaphi + DT*(q1*q1*GL6_A11 + q2*q2*GL6_A12+q3*q3*GL6_A13)/locX;
vxA = shar[tileaddr] + vdel;
vdel = -GL6_C2*deltaphi + DT*(q1*q1*GL6_A21 + q2*q2*GL6_A22+q3*q3*GL6_A23)/locX;
vxB = shar[tileaddr] + vdel;
vdel = -GL6_C3*deltaphi + DT*(q1*q1*GL6_A31 + q2*q2*GL6_A32+q3*q3*GL6_A33)/locX;
vxC = shar[tileaddr] + vdel;
// gravity gradient contribution, vtheta
deltaphi = gravgrad[pitch];
// rotating frame contribution, vtheta
q1 = vxA*(vyA+2*locX*OMEGA);
q2 = vxB*(vyB+2*locX*OMEGA);
q3 = vxC*(vyC+2*locX*OMEGA);
vdel = -DT*(GL6_A11*q1 + GL6_A12*q2+GL6_A13*q3)/locX - GL6_C1 * deltaphi;
vyA = shar[tileaddr+NTH] + vdel;
// update q1 & improve vyB
q1 = vxA*(vyA+2*locX*OMEGA);
vdel = -DT*(GL6_A21*q1 + GL6_A22*q2+GL6_A23*q3)/locX - GL6_C2 * deltaphi;
vyB = shar[tileaddr+NTH] + vdel;
// update q2 & improve vyC
q2 = vxB*(vyB+2*locX*OMEGA);
vdel = -DT*(GL6_A31*q1 + GL6_A32*q2+GL6_A33*q3)/locX - GL6_C3 * deltaphi;
vyC = shar[tileaddr+NTH] + vdel;
}
}
// Compute minus the original kinetic energy density
q1 = shar[tileaddr];
q2 = shar[tileaddr+NTH];
dener = -(q1*q1+q2*q2);
q1 = fluidIn[4*pitch] / locRho;
dener -= q1*q1;
if((coords == SQUARE) || (coords == RZSQUARE)) {
///////////////
//////////// ruh-roh
/////////////
} else {
// evaluate final Vr
q1 = OMEGA*locX + vyA;
q2 = OMEGA*locX + vyB;
q3 = OMEGA*locX + vyC;
deltaphi = gravgrad[0];
shar[tileaddr] = shar[tileaddr] - deltaphi + DT*(GL6_B1*q1*q1 + GL6_B2*q2*q2 + GL6_B3*q3*q3)/locX;
// evalute final Vphi
q1 = vxA*(vyA+2*OMEGA*locX);
q2 = vxB*(vyB+2*OMEGA*locX);
q3 = vxC*(vyC+2*OMEGA*locX);
deltaphi = gravgrad[pitch];
shar[tileaddr+NTH] = shar[tileaddr+NTH] - deltaphi - DT*(GL6_B1*q1 + GL6_B2*q2 + GL6_B3*q3)/locX;
}
vxA = shar[tileaddr];
vyA = shar[tileaddr+NTH];
// Only a linear force in the Z direction: No need to iterate: Exact solution available
deltaphi = gravgrad[2*pitch];
q1 = fluidIn[4*pitch] / locRho - deltaphi;
// Add the new XY/R-theta kinetic energy density
dener += (vxA*vxA + vyA*vyA + q1*q1);
fluidIn[2*pitch] = vxA * locRho;
fluidIn[3*pitch] = vyA * locRho;
fluidIn[4*pitch] = q1 * locRho;
// Change in total energy is exactly the work done by forces
fluidIn[pitch] += .5*locRho*dener;
// Hop pointers forward
fluidIn += nx*SRCBLOCKY;
gravgrad+= nx*SRCBLOCKY;
}
}
/* Solves the combined equations of a rotating frame and gravity
* in either SQUARE or CYLINDRICAL coordinates using the well-known
* 4th order explicit multistage method of Runge & Kutta.
*/
template <geometryType_t coords>
__global__ void cukern_sourceComposite_RK4(double *fluidIn, double *Rvector, double *gravgrad, long pitch)
{
__shared__ double shar[4*SRCBLOCKX*SRCBLOCKY];
//__shared__ double px0[SRCBLOCKX*SRCBLOCKY], py0[SRCBLOCKX*SRCBLOCKY];
/* strategy: XY files, fill in X direction, step in Y direction; griddim.y = Nz */
int myx = threadIdx.x + SRCBLOCKX*blockIdx.x;
int myy = threadIdx.y;
int myz = blockIdx.y;
int nx = devIntParams[0];
int ny;
if((coords == SQUARE) || (coords == CYLINDRICAL)) { // Not RZ coords
ny = devIntParams[1];
} else {
ny = devIntParams[2];
}
if(myx >= devIntParams[0]) return; // return if x >= nx
// Compute global index at the start
int tileaddr = myx + nx*(myy + ny*myz);
fluidIn += tileaddr;
gravgrad += tileaddr;
tileaddr = threadIdx.x + SRCBLOCKX*threadIdx.y;
double locX = Rvector[myx];
Rvector += nx; // Advances this to the Y array for below
double locY;
if((coords == CYLINDRICAL) || (coords == RZCYLINDRICAL)) locY = 0.0;
double locRho, deltaphi;
double vdel, dener;
double vx0, vy0, vxS, vyS, vphi_combined;
int stageCount; double alpha, beta;
alpha = 1.0/6.0;
beta = 0.5;
for(; myy < ny; myy += SRCBLOCKY) {
// Only in square XY or XYZ coordinates must we account for a centripetal term in the the 2-direction
if((coords == SQUARE) || (coords == RZSQUARE)) {
locY = Rvector[myy];
}
locRho = *fluidIn;
vx0 = fluidIn[2*pitch] / locRho; // convert to vr
vy0 = fluidIn[3*pitch] / locRho; // convert to vy/vphi
shar[tileaddr] = vxS = vx0;
shar[tileaddr+NTH] = vyS = vy0;
for(stageCount = 0; stageCount < 4; stageCount++) {
if((coords == SQUARE) || (coords == RZSQUARE)) {
// Rotating frame contribution, vx
vdel = DT*OMEGA*(OMEGA*locX + 2.0*vyS); // delta-vx
// Gravity gradient contribution, vx
deltaphi = gravgrad[0];
vdel -= deltaphi;
// store predicted value for vx
shar[tileaddr+2*NTH] = vx0 + beta*vdel;
// Accumulate delta
shar[tileaddr] += alpha*vdel;
// rotating frame contribution, vy
vdel = -DT*OMEGA*(OMEGA*locY - 2*vxS);
// gravity gradient contribution, vy
deltaphi = gravgrad[pitch];
vdel -= deltaphi;
// store predicted delta for vy
shar[tileaddr+3*NTH] = vy0 + beta*vdel;
// Accumulate delta
shar[tileaddr] += alpha*vdel;
} else {
// Rotating frame contribution + cylindrical contribution, pr
vphi_combined = OMEGA*locX + shar[tileaddr+NTH];
vdel = DT*vphi_combined*vphi_combined / locX;
// Gravity gradient contribution, pr
deltaphi = gravgrad[0];
vdel -= deltaphi;
// store predicted value for vr
shar[tileaddr+2*NTH] = vx0 + beta*vdel;
// Accumulate delta
shar[tileaddr] += alpha*vdel;
// rotating frame contribution, ptheta
vphi_combined = shar[tileaddr+NTH] + 2*locX*OMEGA;
vdel = -DT*shar[tileaddr]*vphi_combined / locX;
// gravity gradient contribution, ptheta
deltaphi = gravgrad[pitch];
vdel -= deltaphi;
// store predicted delta for vtheta
shar[tileaddr+3*NTH] = vy0 + beta*vdel;
// Accumulate delta
shar[tileaddr] += alpha*vdel;
}
__syncthreads();
vxS = shar[tileaddr + 2*NTH];
vyS = shar[tileaddr + 3*NTH];
__syncthreads();
switch(stageCount) {
case 0: alpha = 1.0/3.0; break;
case 1: beta = 1.0; break;
case 2: alpha = 1.0/6.0; break;
}
}
vphi_combined = fluidIn[4*pitch] / locRho; // vz...
dener = -(vx0*vx0+vy0*vy0+vphi_combined*vphi_combined);
deltaphi = gravgrad[2*pitch];
vphi_combined -= deltaphi;
// Download the final values from shmem
vxS = shar[tileaddr];
vyS = shar[tileaddr + NTH];
// Add the new XY/R-theta kinetic energy density
dener += vxS*vxS+vyS*vyS+vphi_combined*vphi_combined;
fluidIn[2*pitch] = vxS * locRho;
fluidIn[3*pitch] = vyS * locRho;
fluidIn[4*pitch] = vphi_combined * locRho;
// Change in total energy is exactly the work done by forces
fluidIn[pitch] += .5*locRho*dener;
// Hop pointers forward
fluidIn += nx*SRCBLOCKY;
gravgrad+= nx*SRCBLOCKY;
}
}
/* Simple kernel:
* Given in[0 ... (nodeN-1)], copies the segment in[partX0 ... (partX0 + partNX -1)] to out[0 ... (partNX-1)]
* and helpfully wraps addresses circularly
* invoke with gridDim.x * blockDim.x >= partNX
*/
__global__ void cukern_FetchPartitionSubset1DExtrap(double *in, int nodeN, double *out, int partX0, int partNX)
{
// calculate output address
int addrOut = threadIdx.x + blockDim.x * blockIdx.x;
if(addrOut >= partNX) return;
// Affine map back to input address
int addrIn = addrOut + partX0;
if(addrIn < 0) {
double delta = in[1]-in[0];
out[addrOut] = in[0]+delta*addrIn;
} else {
if(addrIn >= nodeN) {
double delta = in[1]-in[0];
out[addrOut] = in[nodeN-1] + (addrIn-nodeN+1)*delta;
} else {
out[addrOut] = in[addrIn];
}
}
}
/* Converts the fluid slab array from conservative
* [rho, Etotal, px, py, pz]
* variables to
* [rho, Einternal, vx, vy, vz]
* primitive variables which may be more suited for some computations. */
__global__ void cukern_cvtToPrimitiveVars(double *fluid, long partNumel, long pitch)
{
unsigned int globAddr = threadIdx.x + blockDim.x*blockIdx.x;
if(globAddr >= partNumel) return;
double rhoinv, p[3], Etot;
fluid += globAddr;
for(; globAddr < partNumel; globAddr += blockDim.x*gridDim.x) {
rhoinv = 1.0/fluid[0];
Etot = fluid[pitch];
p[0] = fluid[2*pitch];
p[1] = fluid[3*pitch];
p[2] = fluid[4*pitch];
fluid[2*pitch] = p[0]*rhoinv;
fluid[3*pitch] = p[1]*rhoinv;
fluid[4*pitch] = p[2]*rhoinv;
Etot -= .5*(p[0]*p[0]+p[1]*p[1]+p[2]*p[2])*rhoinv;
fluid[pitch] = Etot;
fluid += blockDim.x*gridDim.x;
}
}
/* Converts the fluid slab array from primitive
* [rho, Einternal, vx, vy, vz]
* variables to conservative
* [rho, Etotal, px, py, pz]
* variables which are mandatory for conservative flux differencing */
__global__ void cukern_cvtToConservativeVars(double *fluid, long partNumel, long pitch)
{
unsigned int globAddr = threadIdx.x + blockDim.x*blockIdx.x;
if(globAddr >= partNumel) return;
double rho, v[3], Eint;
fluid += globAddr;
for(; globAddr < partNumel; globAddr += blockDim.x*gridDim.x) {
rho = fluid[0];
Eint = fluid[pitch];
v[0] = fluid[2*pitch];
v[1] = fluid[3*pitch];
v[2] = fluid[4*pitch];
fluid[2*pitch] = v[0]*rho;
fluid[3*pitch] = v[1]*rho;
fluid[4*pitch] = v[2]*rho;
Eint += .5*(v[0]*v[0]+v[1]*v[1]+v[2]*v[2])*rho;
fluid[pitch] = Eint;
fluid += blockDim.x*gridDim.x;
}
}
|
903384e78415b21cabc487a4a57f9bc4db6d7647.cu
|
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#ifndef NOMATLAB
#include "mex.h"
#endif
// CUDA
#include "cuda.h"
#include "nvToolsExt.h"
#include "cudaCommon.h"
#include "cudaSourceScalarPotential.h"
#include "cudaGradientKernels.h"
#include "cudaTestSourceComposite.h"
#define SRCBLOCKX 16
#define SRCBLOCKY 16
__global__ void cukern_FetchPartitionSubset1DExtrap(double *in, int nodeN, double *out, int partX0, int partNX);
template <geometryType_t coords>
__global__ void cukern_sourceComposite_IMP(double *fluidIn, double *Rvector, double *gravgrad, long pitch);
template <geometryType_t coords>
__global__ void cukern_sourceComposite_RK4(double *fluidIn, double *Rvector, double *gravgrad, long pitch);
// This will probably be slow as balls but should provide a golden standard of accuracy
template <geometryType_t coords>
__global__ void cukern_sourceComposite_GL4(double *fluidIn, double *Rvector, double *gravgrad, long pitch);
template <geometryType_t coords>
__global__ void cukern_sourceComposite_GL6(double *fluidIn, double *Rvector, double *gravgrad, long pitch);
__constant__ __device__ double devLambda[12];
#define LAMX devLambda[0]
#define LAMY devLambda[1]
#define LAMZ devLambda[2]
// Define: F = -beta * rho * grad(phi)
// rho_g = density for full effect of gravity
// rho_c = minimum density to feel gravity at all
// beta = { rho_g < rho : 1 (NORMAL GRAVITY) }
// { rho_c < rho < rho_g : [(rho-rho_c)/(rho_g-rho_c)]^2 }
// { rho < rho_c : 0 }
// This provides a continuous (though not differentiable at rho = rho_g) way to surpress gravitation of the background fluid
// The original process of cutting gravity off below a critical density a few times the minimum
// density is believed to cause "blowups" at the inner edge of circular flow profiles due to being
// discontinuous. If even smoothness is insufficient and smooth differentiability is required,
// a more-times-continuous profile can be constructed, but let's not go there unless forced.
// Density below which we force gravity effects to zero
#define RHO_FULLG devLambda[3]
#define RHO_NOG devLambda[4]
// 1 / (rho_fullg - rho_nog)
#define G1 devLambda[5]
// rho_nog / (rho_fullg - rho_nog)
#define G2 devLambda[6]
#define RINNER devLambda[7]
#define DELTAR devLambda[8]
// __constant__ parameters for the rotating frame terms
#define OMEGA devLambda[9]
#define DT devLambda[10]
#define TWO_OMEGA_T devLambda[11]
__constant__ __device__ int devIntParams[3];
#ifdef STANDALONE_MEX_FUNCTION
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if ((nrhs!=5) || (nlhs != 0)) mexErrMsgTxt("Wrong number of arguments: need cudaTestSourceComposite(FluidManager, phi, GeometryManager, [rhomin, rho_fullg, dt, spaceorder], [xvector yvector])\n");
CHECK_CUDA_ERROR("entering cudaSourceRotatingFrame");
// Get source array info and create destination arrays
MGArray fluid[5], gravPot, xyvec;
/* FIXME: accept this as a matlab array instead
* FIXME: Transfer appropriate segments to __constant__ memory
* FIXME: that seems the only reasonable way to avoid partitioning hell
*/
double *scalars = mxGetPr(prhs[3]);
if(mxGetNumberOfElements(prhs[3]) != 5) {
PRINT_FAULT_HEADER;
printf("The 4th argument must be a five element vector: [rho_nog, rho_fullg, dt, space order, temporal order]. It contains %lui elements.\n", mxGetNumberOfElements(prhs[3]));
PRINT_FAULT_FOOTER;
DROP_MEX_ERROR("Invalid arguments, brah!");
}
double rhonog= scalars[0];
double rhofg = scalars[1];
double dt = scalars[2];
int spaceOrder = (int)scalars[3];
int timeOrder = (int)scalars[4];
GeometryParams geom = accessMatlabGeometryClass(prhs[2]);
int status;
status = MGA_accessMatlabArrays(prhs, 4, 4, &xyvec);
if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to access X-Y vector."); }
if(spaceOrder != 0) {
status = MGA_accessMatlabArrays(prhs, 1, 1, &gravPot);
if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to access gravity potential array."); }
}
dim3 gridsize, blocksize;
int numFluids = mxGetNumberOfElements(prhs[0]);
int fluidct;
// Allocate one buffer to be used if we have multiple fluids
MGArray tempSlab;
tempSlab.nGPUs = -1; // nonallocated marker
for(fluidct = 0; fluidct < numFluids; fluidct++) {
status = MGA_accessFluidCanister(prhs[0], fluidct, &fluid[0]);
if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) break;
mxArray *q = derefXatNdotAdotB(prhs[0], fluidct, "MINMASS", NULL);
double rhomin = *mxGetPr(q);
double rhonog= rhomin * 4; // FIXME this is a test hack
double rhofg = rhomin * 4.1;
status = sourcefunction_Composite(&fluid[0], &gravPot, &xyvec, geom, rhonog, rhofg, dt, spaceOrder, timeOrder, &tempSlab);
if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to apply rotating frame source terms."); }
}
MGA_delete(&tempSlab);
}
#endif
int sourcefunction_Composite(MGArray *fluid, MGArray *phi, MGArray *XYVectors, GeometryParams geom, double rhoNoG, double rhoFullGravity, double dt, int spaceOrder, int timeOrder, MGArray *storageBuffer)
{
#ifdef USE_NVTX
nvtxRangePush(__FUNCTION__);
#endif
dim3 gridsize, blocksize;
int3 arraysize;
double lambda[11];
int i;
int worked;
double *devXYset[fluid->nGPUs];
int sub[6];
double *dx = &geom.h[0];
lambda[3] = rhoFullGravity;
lambda[4] = rhoNoG;
lambda[5] = 1.0/(rhoFullGravity - rhoNoG);
lambda[6] = rhoNoG/(rhoFullGravity - rhoNoG);
lambda[7] = geom.Rinner; // This is actually overwritten per partition below
lambda[8] = dx[1];
lambda[9] = geom.frameOmega;
lambda[10]= dt;
//int isThreeD = (fluid->dim[2] > 1);
int isRZ = (fluid->dim[2] > 1) & (fluid->dim[1] == 1);
MGArray gradslab;
gradslab.nGPUs = -1;
int usingLocalStorage = 0;
// if we get no buffer then allocate local storage
if(storageBuffer == NULL) {
usingLocalStorage = 1;
storageBuffer = &gradslab;
}
if(storageBuffer->nGPUs == -1) { // need to allocate it
#ifdef USE_NVTX
nvtxMark("cudaTestSourceComposite.cu:182 large malloc 3 slabs");
#endif
worked = MGA_allocSlab(phi, storageBuffer, 3);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked;
}
MGArray *gs = storageBuffer;
worked = computeCentralGradient(phi, gs, geom, spaceOrder, dt);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked;
for(i = 0; i < fluid->nGPUs; i++) {
cudaSetDevice(fluid->deviceID[i]);
calcPartitionExtent(fluid, i, &sub[0]);
lambda[7] = geom.Rinner + dx[0] * sub[0]; // Innermost cell coord may change per-partition
cudaMemcpyToSymbol((const void *)devLambda, lambda, 11*sizeof(double), 0, cudaMemcpyHostToDevice);
worked = CHECK_CUDA_ERROR("cudaMemcpyToSymbol");
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) break;
cudaMemcpyToSymbol((const void *)devIntParams, &sub[3], 3*sizeof(int), 0, cudaMemcpyHostToDevice);
worked = CHECK_CUDA_ERROR("memcpy to symbol");
if(worked != SUCCESSFUL) break;
cudaMalloc((void **)&devXYset[i], (sub[3]+sub[4])*sizeof(double));
worked = CHECK_CUDA_ERROR("malloc devXYset");
if(worked != SUCCESSFUL) break;
}
if(worked != SUCCESSFUL) return worked;
double *fpi;
// Iterate over all partitions, and here we GO!
for(i = 0; i < fluid->nGPUs; i++) {
cudaSetDevice(fluid->deviceID[i]);
worked = CHECK_CUDA_ERROR("cudaSetDevice");
if(worked != SUCCESSFUL) break;
calcPartitionExtent(fluid, i, sub);
arraysize.x = sub[3]; arraysize.y = sub[4]; arraysize.z = sub[5];
fpi = fluid->devicePtr[i]; // save some readability below...
// This section extracts the portions of the supplied partition-cloned [X;Y] vector relevant to the current partition
blocksize = makeDim3(128, 1, 1);
gridsize.x = ROUNDUPTO(sub[3], 128) / 128;
gridsize.y = gridsize.z = 1;
cukern_FetchPartitionSubset1DExtrap<<<gridsize, blocksize>>>(XYVectors->devicePtr[i], fluid->dim[0], devXYset[i], sub[0], sub[3]);
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, XYVectors, i, "cukern_FetchPartitionSubset1DExtrap, X");
if(worked != SUCCESSFUL) break;
gridsize.x = ROUNDUPTO(sub[4], 128) / 128;
cukern_FetchPartitionSubset1DExtrap<<<gridsize, blocksize>>>(XYVectors->devicePtr[i] + fluid->dim[0], fluid->dim[1], devXYset[i]+sub[3], sub[1], sub[4]);
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, XYVectors, i, "cukern_FetchPartitionSubset1DExtrap, Y");
if(worked != SUCCESSFUL) break;
// Prepare to launch the solver itself!
arraysize.x = sub[3]; arraysize.y = sub[4]; arraysize.z = sub[5];
blocksize = makeDim3(SRCBLOCKX, SRCBLOCKY, 1);
gridsize.x = ROUNDUPTO(arraysize.x, blocksize.x) / blocksize.x;
gridsize.y = (isRZ) ? 1 : arraysize.z;
gridsize.z = 1;
switch(timeOrder) {
case 2:
if(isRZ) {
if(geom.shape == SQUARE) {
cukern_sourceComposite_IMP<RZSQUARE><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
} else {
cukern_sourceComposite_IMP<RZCYLINDRICAL><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
}
} else {
if(geom.shape == SQUARE) {
cukern_sourceComposite_IMP<SQUARE><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
} else {
cukern_sourceComposite_IMP<CYLINDRICAL><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
}
}
break;
case 4:
if(isRZ) {
if(geom.shape == SQUARE) {
worked = ERROR_NOIMPLEMENT; break;
//cukern_sourceComposite_GL4<RZSQUARE><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
} else {
cukern_sourceComposite_GL4<RZCYLINDRICAL><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
}
} else {
if(geom.shape == SQUARE) {
worked = ERROR_NOIMPLEMENT; break;
//cukern_sourceComposite_GL4<SQUARE><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
} else {
cukern_sourceComposite_GL4<CYLINDRICAL><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
}
}
break;
case 6:
if(isRZ) {
if(geom.shape == SQUARE) {
worked = ERROR_NOIMPLEMENT; break;
//cukern_sourceComposite_GL6<RZSQUARE><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
} else {
cukern_sourceComposite_GL6<RZCYLINDRICAL><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
}
} else {
if(geom.shape == SQUARE) {
worked = ERROR_NOIMPLEMENT; break;
//cukern_sourceComposite_GL6<SQUARE><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
} else {
cukern_sourceComposite_GL6<CYLINDRICAL><<<gridsize, blocksize>>>(fpi, devXYset[i], gs->devicePtr[i], fluid->slabPitch[i]/8);
}
}
break;
default:
PRINT_FAULT_HEADER;
printf("Source function requires a temporal order of 2 (implicit midpt), 4 (Gauss-Legendre 4th order) or 6 (GL-6th): Received %i\n", timeOrder);
PRINT_FAULT_FOOTER;
break;
}
if(worked != SUCCESSFUL) break;
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, fluid, i, "cukernSourceComposite");
if(worked != SUCCESSFUL) break;
}
if(worked != SUCCESSFUL) return worked;
worked = MGA_exchangeLocalHalos(fluid, 5);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked;
int j; // This will halt at the stage failed upon if CUDA barfed above
#ifdef USE_NVTX
nvtxMark("Freeing devXYset");
#endif
for(j = 0; j < i; j++) {
cudaFree((void *)devXYset[j]);
}
if(usingLocalStorage) {
#ifdef USE_NVTX
nvtxMark("cudaTestSourceComposite.cu:323 large free");
#endif
MGA_delete(gs);
}
// Don't bother checking cudaFree if we already have an error caused above, it was just trying to clean up the barf
if(worked == SUCCESSFUL)
worked = CHECK_CUDA_ERROR("cudaFree");
#ifdef USE_NVTX
nvtxRangePop();
#endif
return CHECK_IMOGEN_ERROR(worked);
}
/* The equations of motion for a rotating frame:
*
* a = -[2 w X v + w X (w X r) ]
* dv = -[2 w X v + w X (w X r) ] dt
* dp = -rho dv = -rho [[2 w X v + w X (w X r) ] dt
* dp = -[2 w X p + rho w X (w X r) ] dt
*
* w X p = |I J K | = <-w py, w px, 0> = u
* |0 0 w |
* |px py pz|
*
* w X r = <-w y, w x, 0> = s;
* w X s = |I J K| = <-w^2 x, -w^2 y, 0> = -w^2<x,y,0> = b
* |0 0 w|
* |-wy wx 0|
* dp = -[2 u + rho b] dt
* = -[2 w<-py, px, 0> - rho w^2 <x, y, 0>] dt
* = w dt [2<py, -px> + rho w <x, y>] in going to static frame
*
* dE = -v dot dp
*/
/* rho, E, Px, Py, Pz: arraysize-sized arrays
omega: scalar
Rx: [nx 1 1] sized array
Ry: [ny 1 1] sized array */
#define JACOBI_ITER_MAX 3
#define NTH (SRCBLOCKX*SRCBLOCKY)
/* Solves the combined equations of a rotating frame and gravity,
*
* d/dt[ px ] = - rho (2 w X v + w X (w X r)).xhat - rho dphi/dx
* [ py ] = - rho (2 w X v + w X (w X r)).yhat - rho dphi/dy
* [ pz ] = - rho (2 w X v + w X (w X r)).zhat - rho dphi/dz
* [ E ] = p.dp/2
*
* in either SQUARE or CYLINDRICAL coordinates using the implicit midpoint method,
*
* y_half = y_0 + .5 dt f(y_half);
* y_1 = y_0 + dt f(y_half);
*
* The implicit equations are iterated using JACOBI_ITER_MAX Jacobi steps updating vx then vy.
* Frame rotation is always in the z-hat direction so no nonlinearity appears in the z direction.
*/
template <geometryType_t coords>
__global__ void cukern_sourceComposite_IMP(double *fluidIn, double *Rvector, double *gravgrad, long pitch)
{
__shared__ double shar[4*SRCBLOCKX*SRCBLOCKY];
//__shared__ double px0[SRCBLOCKX*SRCBLOCKY], py0[SRCBLOCKX*SRCBLOCKY];
/* strategy: XY files, fill in X direction, step in Y direction; griddim.y = Nz */
int myx = threadIdx.x + SRCBLOCKX*blockIdx.x;
int myy = threadIdx.y;
int myz = blockIdx.y;
int nx = devIntParams[0];
int ny;
if((coords == SQUARE) || (coords == CYLINDRICAL)) { // Not RZ coords
ny = devIntParams[1];
} else {
ny = devIntParams[2];
}
if(myx >= devIntParams[0]) return; // return if x >= nx
// Compute global index at the start
int tileaddr = myx + nx*(myy + ny*myz);
fluidIn += tileaddr;
gravgrad += tileaddr;
tileaddr = threadIdx.x + SRCBLOCKX*threadIdx.y;
double locX = Rvector[myx];
Rvector += nx; // Advances this to the Y array for below
double locY;
if((coords == CYLINDRICAL) || (coords == RZCYLINDRICAL)) locY = 0.0;
double locRho, deltaphi;
double vdel, dener;
double vx0, vy0, vz0, vphi_combined;
int jacobiIters;
for(; myy < ny; myy += SRCBLOCKY) {
// Only in square XY or XYZ coordinates must we account for a centripetal term in the the 2-direction
if((coords == SQUARE) || (coords == RZSQUARE)) {
locY = Rvector[myy];
}
locRho = *fluidIn;
vx0 = fluidIn[2*pitch] / locRho; // convert to vr
vy0 = fluidIn[3*pitch] / locRho; // convert to vy/vphi
vz0 = fluidIn[4*pitch] / locRho;
shar[tileaddr] = vx0;
shar[tileaddr+NTH] = vy0;
// Repeatedly perform fixed point iterations to solve the combined time differential operators
// This yields the implicit Euler value for the midpoint (t = 0.5) if successful
for(jacobiIters = 0; jacobiIters < JACOBI_ITER_MAX; jacobiIters++) {
if((coords == SQUARE) || (coords == RZSQUARE)) {
// Rotating frame contribution, vx
vdel = DT*OMEGA*(OMEGA*locX + 2.0*shar[tileaddr+NTH]); // delta-vx
// Gravity gradient contribution, vx
deltaphi = gravgrad[0];
vdel -= deltaphi;
// store predicted value for vx
shar[tileaddr+2*NTH] = vx0 + .5*vdel;
// rotating frame contribution, vy
vdel = -DT*OMEGA*(OMEGA*locY - 2*shar[tileaddr]);
// gravity gradient contribution, vy
deltaphi = gravgrad[pitch];
vdel -= deltaphi;
// store predicted delta for vy
shar[tileaddr+3*NTH] = vy0 + .5*vdel;
} else {
// Rotating frame contribution + cylindrical contribution, pr
vphi_combined = OMEGA*locX + shar[tileaddr+NTH];
vdel = DT*vphi_combined*vphi_combined / locX; // a = (vphi + r*W)^2 / r
// Gravity gradient contribution, pr
deltaphi = gravgrad[0];
vdel -= deltaphi;
// store predicted value for pr
shar[tileaddr+2*NTH] = vx0 + .5*vdel;
// rotating frame contribution, ptheta
vphi_combined = shar[tileaddr+NTH] + 2*locX*OMEGA; // a = -vr vphi - 2 vr w
vdel = -DT*shar[tileaddr]*vphi_combined / locX;
// gravity gradient contribution, ptheta
deltaphi = gravgrad[pitch];
vdel -= deltaphi;
// store predicted delta for ptheta
shar[tileaddr+3*NTH] = vy0 + .5*vdel;
}
__syncthreads();
shar[tileaddr] = shar[tileaddr+2*NTH];
shar[tileaddr+NTH] = shar[tileaddr+3*NTH];
__syncthreads();
}
// Compute minus the original XY/R-theta kinetic energy density
dener = -(vx0*vx0+vy0*vy0+vz0*vz0);
if((coords == SQUARE) || (coords == RZSQUARE)) {
// Rotating frame contribution, vx
vdel = DT*OMEGA*(OMEGA*locX + 2.0*shar[tileaddr+2*NTH]); // delta-vx
// Gravity gradient contribution, vx
deltaphi = gravgrad[0];
vdel -= deltaphi;
// store value for vx
vx0 += vdel;
// rotating frame contribution, vy
vdel = -DT*OMEGA*(OMEGA*locY - 2*shar[tileaddr+NTH]);
// gravity gradient contribution, vy
deltaphi = gravgrad[pitch];
vdel -= deltaphi;
// store delta for vy
vy0 += vdel;
} else {
// Rotating frame contribution + cylindrical contribution, pr
vphi_combined = OMEGA*locX + shar[tileaddr+NTH];
vdel = DT*vphi_combined*vphi_combined/locX;
// Gravity gradient contribution, pr
deltaphi = gravgrad[0];
vdel -= deltaphi;
// store predicted value for pr
vx0 += vdel;
// rotating frame contribution, ptheta
vphi_combined = shar[tileaddr+NTH] + 2*locX*OMEGA;
vdel = -DT*shar[tileaddr]*vphi_combined/locX;
// gravity gradient contribution, ptheta
deltaphi = gravgrad[pitch];
vdel -= deltaphi;
// store predicted delta for ptheta
vy0 += vdel;
}
// Only a linear force in the Z direction: No need to iterate: Exact solution available
deltaphi = gravgrad[2*pitch];
vz0 -= deltaphi;
// Add the new XY/R-theta kinetic energy density
dener += vx0*vx0+vy0*vy0+vz0*vz0;
fluidIn[2*pitch] = vx0 * locRho;
fluidIn[3*pitch] = vy0 * locRho;
fluidIn[4*pitch] = vz0 * locRho;
// Change in total energy is exactly the work done by forces
fluidIn[pitch] += .5*locRho*dener;
// Hop pointers forward
fluidIn += nx*SRCBLOCKY;
gravgrad+= nx*SRCBLOCKY;
}
}
/* These coefficients define the Butcher tableau of the
* 4th order Gauss-Legendre quadrature method */
#define GL4_C1 0.2113248654051871344705659794271924
#define GL4_C2 0.7886751345948128655294340205728076
#define GL4_A11 .25
#define GL4_A12 -0.03867513459481286552943402057280764
#define GL4_A21 0.5386751345948128655294340205728076
#define GL4_A22 .25
/* Solves the combined equations of a rotating frame and gravity in either
* SQUARE or CYLINDRICAL coordinates using 4th order Gauss-Legendre quadrature.
* This requires simultaneous solution of 2N equations at 2 intermediate points,
* for N=2 (vx and vy) followed by evaluation of the output sum.
*
* The implicit solve makes a forward Euler starter prediction before
* applying Jacobi iterations to update in the order
* vx1, vy1, vx2, vy2
* for up to JACOBI_MAX_ITER times.
*/
template <geometryType_t coords>
__global__ void cukern_sourceComposite_GL4(double *fluidIn, double *Rvector, double *gravgrad, long pitch)
{
__shared__ double shar[4*SRCBLOCKX*SRCBLOCKY];
/* strategy: XY files, fill in X direction, step in Y direction; griddim.y = Nz */
int myx = threadIdx.x + SRCBLOCKX*blockIdx.x;
int myy = threadIdx.y;
int myz = blockIdx.y;
int nx = devIntParams[0];
int ny;
if((coords == SQUARE) || (coords == CYLINDRICAL)) { // Not RZ coords
ny = devIntParams[1];
} else {
ny = devIntParams[2];
}
if(myx >= devIntParams[0]) return; // return if x >= nx
// Compute global index at the start
int tileaddr = myx + nx*(myy + ny*myz);
fluidIn += tileaddr;
gravgrad += tileaddr;
tileaddr = threadIdx.x + SRCBLOCKX*threadIdx.y;
double locX = Rvector[myx];
Rvector += nx; // Advances this to the Y array for below
double locY;
if((coords == CYLINDRICAL) || (coords == RZCYLINDRICAL)) locY = 0.0;
double locRho, deltaphi;
double vdel, dener;
double vxA, vxB, vyA, vyB;
double q1, q2; // temp vars?
int jacobiIters;
for(; myy < ny; myy += SRCBLOCKY) {
// Only in square XY or XYZ coordinates must we account for a centripetal term in the the 2-direction
if((coords == SQUARE) || (coords == RZSQUARE)) {
locY = Rvector[myy];
}
locRho = *fluidIn;
vxA = fluidIn[2*pitch] / locRho; // convert to vr
vyA = fluidIn[3*pitch] / locRho; // convert to vy/vphi
shar[tileaddr] = vxA;
shar[tileaddr+NTH] = vyA;
// Generate a 1st order prediction for what the values will be using fwd euler
// This is worth roughly 1 iteration but as can be seen will take way less time
if((coords == SQUARE) || (coords == RZSQUARE)) {
q1 = DT*OMEGA*(OMEGA*locX + 2.0*vyA) - gravgrad[0]; // delta-vx
q2 = -DT*OMEGA*(OMEGA*locY - 2*vxA) - gravgrad[pitch]; // delta-vy
vxB = vxA + GL4_C2 * q1;
vyB = vxB + GL4_C2 * q2;
vxA += GL4_C1 * q1;
vyA += GL4_C1 * q2;
} else {
q1 = OMEGA*locX + vyA;
q2 = -vxA*(vyA + 2*OMEGA*locX);
deltaphi = gravgrad[0];
vxB = vxA + GL4_C2*(DT*q1*q1/locX - deltaphi);
vxA += GL4_C1*(DT*q1*q1/locX - deltaphi);
deltaphi = gravgrad[pitch];
vyB = vyA + GL4_C2*(DT*q2/locX - deltaphi);
vyA += GL4_C1*(DT*q2/locX - deltaphi);
}
// Repeatedly perform fixed point iterations to solve the combined differential operators
for(jacobiIters = 0; jacobiIters < JACOBI_ITER_MAX; jacobiIters++) {
if((coords == SQUARE) || (coords == RZSQUARE)) {
/////////////////
/////////////// ruh-roh
///////////////
} else {
// Rotating frame contribution + cylindrical contribution, vr, step A
q1 = OMEGA*locX + vyA;
q2 = OMEGA*locX + vyB;
// Gravity gradient contribution, vr, step A
deltaphi = gravgrad[0];
// Improve estimates for radial velocity
vdel = -GL4_C1*deltaphi + DT*(q1*q1*GL4_A11 + q2*q2*GL4_A12)/locX;
vxA = shar[tileaddr] + vdel;
vdel = -GL4_C2*deltaphi + DT*(q1*q1*GL4_A21 + q2*q2*GL4_A22)/locX;
vxB = shar[tileaddr] + vdel;
// Load azimuthal gravity gradient
deltaphi = gravgrad[pitch];
q2 = vxB*(vyB+2*locX*OMEGA);
// Note we leave the GL quadrature coefficient off and can reuse q2
//vdel = -DT*(GL4_A11*2*locX*OMEGA*vxA+GL4_A12*q2)/locX - GL4_C1 * deltaphi;
//vyA = (shar[tileaddr + NTH] + vdel)/(1+DT*GL4_A11*vxA/locX)
vdel = -DT*(GL4_A11*2*locX*OMEGA*vxA+GL4_A12*q2) - GL4_C1 * deltaphi * locX;
vyA = (shar[tileaddr + NTH]*locX + vdel)/(locX+DT*GL4_A11*vxA);
q1 = vxA*(vyA+2*locX*OMEGA);
vdel = -DT*(GL4_A21*q1+GL4_A22*2*locX*OMEGA*vxB) - GL4_C2 * deltaphi * locX;
vyB = (shar[tileaddr+NTH]*locX + vdel)/(locX + DT*GL4_A11*vxA);
}
}
// Compute minus the original kinetic energy density
q1 = shar[tileaddr];
q2 = shar[tileaddr+NTH];
dener = -(q1*q1+q2*q2);
q1 = fluidIn[4*pitch] / locRho;
dener -= q1*q1;
if((coords == SQUARE) || (coords == RZSQUARE)) {
///////////////
//////////// ruh-roh
/////////////
} else {
// evaluate final Vr
q1 = OMEGA*locX + vyA;
q2 = OMEGA*locX + vyB;
deltaphi = gravgrad[0];
shar[tileaddr] = shar[tileaddr] - deltaphi + .5*DT*(q1*q1+q2*q2)/locX;
// evalute final Vphi
deltaphi = gravgrad[pitch];
shar[tileaddr+NTH] = shar[tileaddr+NTH] - deltaphi - .5*DT*(vxA*(vyA+2*OMEGA*locX)+vxB*(vyB+2*OMEGA*locX))/locX;
}
vxA = shar[tileaddr];
vyA = shar[tileaddr+NTH];
// Only a linear force in the Z direction: No need to iterate: Exact solution available
deltaphi = gravgrad[2*pitch];
q1 = fluidIn[4*pitch] / locRho - deltaphi;
// Add the new XY/R-theta kinetic energy density
dener += (vxA*vxA + vyA*vyA + q1*q1);
fluidIn[2*pitch] = vxA * locRho;
fluidIn[3*pitch] = vyA * locRho;
fluidIn[4*pitch] = q1 * locRho;
// Change in total energy is exactly the work done by forces
fluidIn[pitch] += .5*locRho*dener;
// Hop pointers forward
fluidIn += nx*SRCBLOCKY;
gravgrad+= nx*SRCBLOCKY;
}
}
#define GL6_C1 0.28918148932210804453
#define GL6_C2 .5
#define GL6_C3 0.71081851067789195547
#define GL6_A11 0.13888888888888888889
#define GL6_A21 0.30026319498086459244
#define GL6_A31 0.26798833376246945173
#define GL6_A12 -0.035976667524938903456
#define GL6_A22 0.22222222222222222222
#define GL6_A32 0.4804211119693833479
#define GL6_A13 0.0097894440153083260496
#define GL6_A23 -0.02248541720308681466
#define GL6_A33 0.13888888888888888889
#define GL6_B1 0.27777777777777777778
#define GL6_B2 0.44444444444444444444
#define GL6_B3 0.27777777777777777778
/* Solves the combined equations of a rotating frame and gravity
* in either SQUARE or CYLINDRICAL coordinates using 6th order
* Gauss-Legendre quadrature: This requires simultaneous self-consistent
* solution of 3N equations at 3 intermediate points, for N=2 (vx and vy)
* followed by evaluation of the output sum.
*
* The implicit solve makes a forward Euler starter prediction before
* applying Jacobi iterations to update in the order
* vx1, vx2, vx3, vy1, vy2, vy3
* for up to JACOBI_MAX_ITER times.
*/
template <geometryType_t coords>
__global__ void cukern_sourceComposite_GL6(double *fluidIn, double *Rvector, double *gravgrad, long pitch)
{
__shared__ double shar[6*SRCBLOCKX*SRCBLOCKY];
//__shared__ double px0[SRCBLOCKX*SRCBLOCKY], py0[SRCBLOCKX*SRCBLOCKY];
// strategy: XY files, fill in X direction, step in Y direction; griddim.y = Nz
int myx = threadIdx.x + SRCBLOCKX*blockIdx.x;
int myy = threadIdx.y;
int myz = blockIdx.y;
int nx = devIntParams[0];
int ny;
if((coords == SQUARE) || (coords == CYLINDRICAL)) { // Not RZ coords
ny = devIntParams[1];
} else {
ny = devIntParams[2];
}
if(myx >= devIntParams[0]) return; // return if x >= nx
// Compute global index at the start
int tileaddr = myx + nx*(myy + ny*myz);
fluidIn += tileaddr;
gravgrad += tileaddr;
tileaddr = threadIdx.x + SRCBLOCKX*threadIdx.y;
double locX = Rvector[myx];
Rvector += nx; // Advances this to the Y array for below
double locY;
if((coords == CYLINDRICAL) || (coords == RZCYLINDRICAL)) locY = 0.0;
double locRho, deltaphi;
double vdel, dener;
double vxA, vxB, vxC, vyA, vyB, vyC;
double q1, q2, q3; // temp vars?
int jacobiIters;
for(; myy < ny; myy += SRCBLOCKY) {
// Only in square XY or XYZ coordinates must we account for a centripetal term in the the 2-direction
if((coords == SQUARE) || (coords == RZSQUARE)) {
locY = Rvector[myy];
}
locRho = *fluidIn;
vxA = fluidIn[2*pitch] / locRho; // convert to vr
vyA = fluidIn[3*pitch] / locRho; // convert to vy/vphi
shar[tileaddr] = vxA;
shar[tileaddr+NTH] = vyA;
// Generate a 1st order prediction for what the values will be using fwd euler
// This is worth roughly 1 iteration but as can be seen will take way less time
if((coords == SQUARE) || (coords == RZSQUARE)) {
/////
/////
/////
} else {
q1 = OMEGA*locX + vyA;
q2 = -vxA*(vyA + 2*OMEGA*locX);
deltaphi = gravgrad[0];
vxC = vxA + GL6_C3*(DT*q1*q1/locX - deltaphi);
vxB = vxA + GL6_C2*(DT*q1*q1/locX - deltaphi);
vxA += GL6_C1*(DT*q1*q1/locX - deltaphi);
deltaphi = gravgrad[pitch];
vyC = vyA + GL6_C3*(DT*q2/locX - deltaphi);
vyB = vyA + GL6_C2*(DT*q2/locX - deltaphi);
vyA += GL6_C1*(DT*q2/locX - deltaphi);
}
// Repeatedly perform fixed point iterations to solve the combined time differential operators
// This yields the implicit Euler value for the midpoint (t = 0.5) if successful
for(jacobiIters = 0; jacobiIters < JACOBI_ITER_MAX; jacobiIters++) {
if((coords == SQUARE) || (coords == RZSQUARE)) {
///////////////
/////////////// ruh-roh
///////////////
} else {
// Rotating frame contribution + cylindrical contribution, Vr:
// Depends only on Vtheta... improve all estimates for Vr now:
q1 = OMEGA*locX + vyA;
q2 = OMEGA*locX + vyB;
q3 = OMEGA*locX + vyC;
// Gravity gradient contribution, vr
deltaphi = gravgrad[0];
vdel = -GL6_C1*deltaphi + DT*(q1*q1*GL6_A11 + q2*q2*GL6_A12+q3*q3*GL6_A13)/locX;
vxA = shar[tileaddr] + vdel;
vdel = -GL6_C2*deltaphi + DT*(q1*q1*GL6_A21 + q2*q2*GL6_A22+q3*q3*GL6_A23)/locX;
vxB = shar[tileaddr] + vdel;
vdel = -GL6_C3*deltaphi + DT*(q1*q1*GL6_A31 + q2*q2*GL6_A32+q3*q3*GL6_A33)/locX;
vxC = shar[tileaddr] + vdel;
// gravity gradient contribution, vtheta
deltaphi = gravgrad[pitch];
// rotating frame contribution, vtheta
q1 = vxA*(vyA+2*locX*OMEGA);
q2 = vxB*(vyB+2*locX*OMEGA);
q3 = vxC*(vyC+2*locX*OMEGA);
vdel = -DT*(GL6_A11*q1 + GL6_A12*q2+GL6_A13*q3)/locX - GL6_C1 * deltaphi;
vyA = shar[tileaddr+NTH] + vdel;
// update q1 & improve vyB
q1 = vxA*(vyA+2*locX*OMEGA);
vdel = -DT*(GL6_A21*q1 + GL6_A22*q2+GL6_A23*q3)/locX - GL6_C2 * deltaphi;
vyB = shar[tileaddr+NTH] + vdel;
// update q2 & improve vyC
q2 = vxB*(vyB+2*locX*OMEGA);
vdel = -DT*(GL6_A31*q1 + GL6_A32*q2+GL6_A33*q3)/locX - GL6_C3 * deltaphi;
vyC = shar[tileaddr+NTH] + vdel;
}
}
// Compute minus the original kinetic energy density
q1 = shar[tileaddr];
q2 = shar[tileaddr+NTH];
dener = -(q1*q1+q2*q2);
q1 = fluidIn[4*pitch] / locRho;
dener -= q1*q1;
if((coords == SQUARE) || (coords == RZSQUARE)) {
///////////////
//////////// ruh-roh
/////////////
} else {
// evaluate final Vr
q1 = OMEGA*locX + vyA;
q2 = OMEGA*locX + vyB;
q3 = OMEGA*locX + vyC;
deltaphi = gravgrad[0];
shar[tileaddr] = shar[tileaddr] - deltaphi + DT*(GL6_B1*q1*q1 + GL6_B2*q2*q2 + GL6_B3*q3*q3)/locX;
// evalute final Vphi
q1 = vxA*(vyA+2*OMEGA*locX);
q2 = vxB*(vyB+2*OMEGA*locX);
q3 = vxC*(vyC+2*OMEGA*locX);
deltaphi = gravgrad[pitch];
shar[tileaddr+NTH] = shar[tileaddr+NTH] - deltaphi - DT*(GL6_B1*q1 + GL6_B2*q2 + GL6_B3*q3)/locX;
}
vxA = shar[tileaddr];
vyA = shar[tileaddr+NTH];
// Only a linear force in the Z direction: No need to iterate: Exact solution available
deltaphi = gravgrad[2*pitch];
q1 = fluidIn[4*pitch] / locRho - deltaphi;
// Add the new XY/R-theta kinetic energy density
dener += (vxA*vxA + vyA*vyA + q1*q1);
fluidIn[2*pitch] = vxA * locRho;
fluidIn[3*pitch] = vyA * locRho;
fluidIn[4*pitch] = q1 * locRho;
// Change in total energy is exactly the work done by forces
fluidIn[pitch] += .5*locRho*dener;
// Hop pointers forward
fluidIn += nx*SRCBLOCKY;
gravgrad+= nx*SRCBLOCKY;
}
}
/* Solves the combined equations of a rotating frame and gravity
* in either SQUARE or CYLINDRICAL coordinates using the well-known
* 4th order explicit multistage method of Runge & Kutta.
*/
template <geometryType_t coords>
__global__ void cukern_sourceComposite_RK4(double *fluidIn, double *Rvector, double *gravgrad, long pitch)
{
__shared__ double shar[4*SRCBLOCKX*SRCBLOCKY];
//__shared__ double px0[SRCBLOCKX*SRCBLOCKY], py0[SRCBLOCKX*SRCBLOCKY];
/* strategy: XY files, fill in X direction, step in Y direction; griddim.y = Nz */
int myx = threadIdx.x + SRCBLOCKX*blockIdx.x;
int myy = threadIdx.y;
int myz = blockIdx.y;
int nx = devIntParams[0];
int ny;
if((coords == SQUARE) || (coords == CYLINDRICAL)) { // Not RZ coords
ny = devIntParams[1];
} else {
ny = devIntParams[2];
}
if(myx >= devIntParams[0]) return; // return if x >= nx
// Compute global index at the start
int tileaddr = myx + nx*(myy + ny*myz);
fluidIn += tileaddr;
gravgrad += tileaddr;
tileaddr = threadIdx.x + SRCBLOCKX*threadIdx.y;
double locX = Rvector[myx];
Rvector += nx; // Advances this to the Y array for below
double locY;
if((coords == CYLINDRICAL) || (coords == RZCYLINDRICAL)) locY = 0.0;
double locRho, deltaphi;
double vdel, dener;
double vx0, vy0, vxS, vyS, vphi_combined;
int stageCount; double alpha, beta;
alpha = 1.0/6.0;
beta = 0.5;
for(; myy < ny; myy += SRCBLOCKY) {
// Only in square XY or XYZ coordinates must we account for a centripetal term in the the 2-direction
if((coords == SQUARE) || (coords == RZSQUARE)) {
locY = Rvector[myy];
}
locRho = *fluidIn;
vx0 = fluidIn[2*pitch] / locRho; // convert to vr
vy0 = fluidIn[3*pitch] / locRho; // convert to vy/vphi
shar[tileaddr] = vxS = vx0;
shar[tileaddr+NTH] = vyS = vy0;
for(stageCount = 0; stageCount < 4; stageCount++) {
if((coords == SQUARE) || (coords == RZSQUARE)) {
// Rotating frame contribution, vx
vdel = DT*OMEGA*(OMEGA*locX + 2.0*vyS); // delta-vx
// Gravity gradient contribution, vx
deltaphi = gravgrad[0];
vdel -= deltaphi;
// store predicted value for vx
shar[tileaddr+2*NTH] = vx0 + beta*vdel;
// Accumulate delta
shar[tileaddr] += alpha*vdel;
// rotating frame contribution, vy
vdel = -DT*OMEGA*(OMEGA*locY - 2*vxS);
// gravity gradient contribution, vy
deltaphi = gravgrad[pitch];
vdel -= deltaphi;
// store predicted delta for vy
shar[tileaddr+3*NTH] = vy0 + beta*vdel;
// Accumulate delta
shar[tileaddr] += alpha*vdel;
} else {
// Rotating frame contribution + cylindrical contribution, pr
vphi_combined = OMEGA*locX + shar[tileaddr+NTH];
vdel = DT*vphi_combined*vphi_combined / locX;
// Gravity gradient contribution, pr
deltaphi = gravgrad[0];
vdel -= deltaphi;
// store predicted value for vr
shar[tileaddr+2*NTH] = vx0 + beta*vdel;
// Accumulate delta
shar[tileaddr] += alpha*vdel;
// rotating frame contribution, ptheta
vphi_combined = shar[tileaddr+NTH] + 2*locX*OMEGA;
vdel = -DT*shar[tileaddr]*vphi_combined / locX;
// gravity gradient contribution, ptheta
deltaphi = gravgrad[pitch];
vdel -= deltaphi;
// store predicted delta for vtheta
shar[tileaddr+3*NTH] = vy0 + beta*vdel;
// Accumulate delta
shar[tileaddr] += alpha*vdel;
}
__syncthreads();
vxS = shar[tileaddr + 2*NTH];
vyS = shar[tileaddr + 3*NTH];
__syncthreads();
switch(stageCount) {
case 0: alpha = 1.0/3.0; break;
case 1: beta = 1.0; break;
case 2: alpha = 1.0/6.0; break;
}
}
vphi_combined = fluidIn[4*pitch] / locRho; // vz...
dener = -(vx0*vx0+vy0*vy0+vphi_combined*vphi_combined);
deltaphi = gravgrad[2*pitch];
vphi_combined -= deltaphi;
// Download the final values from shmem
vxS = shar[tileaddr];
vyS = shar[tileaddr + NTH];
// Add the new XY/R-theta kinetic energy density
dener += vxS*vxS+vyS*vyS+vphi_combined*vphi_combined;
fluidIn[2*pitch] = vxS * locRho;
fluidIn[3*pitch] = vyS * locRho;
fluidIn[4*pitch] = vphi_combined * locRho;
// Change in total energy is exactly the work done by forces
fluidIn[pitch] += .5*locRho*dener;
// Hop pointers forward
fluidIn += nx*SRCBLOCKY;
gravgrad+= nx*SRCBLOCKY;
}
}
/* Simple kernel:
* Given in[0 ... (nodeN-1)], copies the segment in[partX0 ... (partX0 + partNX -1)] to out[0 ... (partNX-1)]
* and helpfully wraps addresses circularly
* invoke with gridDim.x * blockDim.x >= partNX
*/
__global__ void cukern_FetchPartitionSubset1DExtrap(double *in, int nodeN, double *out, int partX0, int partNX)
{
// calculate output address
int addrOut = threadIdx.x + blockDim.x * blockIdx.x;
if(addrOut >= partNX) return;
// Affine map back to input address
int addrIn = addrOut + partX0;
if(addrIn < 0) {
double delta = in[1]-in[0];
out[addrOut] = in[0]+delta*addrIn;
} else {
if(addrIn >= nodeN) {
double delta = in[1]-in[0];
out[addrOut] = in[nodeN-1] + (addrIn-nodeN+1)*delta;
} else {
out[addrOut] = in[addrIn];
}
}
}
/* Converts the fluid slab array from conservative
* [rho, Etotal, px, py, pz]
* variables to
* [rho, Einternal, vx, vy, vz]
* primitive variables which may be more suited for some computations. */
__global__ void cukern_cvtToPrimitiveVars(double *fluid, long partNumel, long pitch)
{
unsigned int globAddr = threadIdx.x + blockDim.x*blockIdx.x;
if(globAddr >= partNumel) return;
double rhoinv, p[3], Etot;
fluid += globAddr;
for(; globAddr < partNumel; globAddr += blockDim.x*gridDim.x) {
rhoinv = 1.0/fluid[0];
Etot = fluid[pitch];
p[0] = fluid[2*pitch];
p[1] = fluid[3*pitch];
p[2] = fluid[4*pitch];
fluid[2*pitch] = p[0]*rhoinv;
fluid[3*pitch] = p[1]*rhoinv;
fluid[4*pitch] = p[2]*rhoinv;
Etot -= .5*(p[0]*p[0]+p[1]*p[1]+p[2]*p[2])*rhoinv;
fluid[pitch] = Etot;
fluid += blockDim.x*gridDim.x;
}
}
/* Converts the fluid slab array from primitive
* [rho, Einternal, vx, vy, vz]
* variables to conservative
* [rho, Etotal, px, py, pz]
* variables which are mandatory for conservative flux differencing */
__global__ void cukern_cvtToConservativeVars(double *fluid, long partNumel, long pitch)
{
unsigned int globAddr = threadIdx.x + blockDim.x*blockIdx.x;
if(globAddr >= partNumel) return;
double rho, v[3], Eint;
fluid += globAddr;
for(; globAddr < partNumel; globAddr += blockDim.x*gridDim.x) {
rho = fluid[0];
Eint = fluid[pitch];
v[0] = fluid[2*pitch];
v[1] = fluid[3*pitch];
v[2] = fluid[4*pitch];
fluid[2*pitch] = v[0]*rho;
fluid[3*pitch] = v[1]*rho;
fluid[4*pitch] = v[2]*rho;
Eint += .5*(v[0]*v[0]+v[1]*v[1]+v[2]*v[2])*rho;
fluid[pitch] = Eint;
fluid += blockDim.x*gridDim.x;
}
}
|
0a0eec5dfba6645b57031ff33dc328f03359bd82.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <limits>
#include <ATen/ATen.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/core/Array.h>
#include <ATen/hip/cub.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/native/hip/SortUtils.cuh>
#include <ATen/native/hip/SortingCommon.cuh>
namespace at { namespace native {
bool should_use_small_sort(const Tensor &self, int64_t dim) {
int64_t ndim = self.dim();
int64_t nsort = self.sizes()[dim];
int64_t threshold;
if (self.scalar_type() == kLong || self.scalar_type() == kDouble) {
threshold = 1024;
} else {
threshold = 2048;
}
return nsort <= threshold;
}
std::vector<int64_t> infer_dense_strides_dim_last(const Tensor & self, int64_t dim);
void fillSliceWithIndex(Tensor& t,int dim) {
if (t.numel()) {
auto sizes = DimVector(t.dim(), 1);
sizes[dim] = t.sizes()[dim];
auto range = at::arange(t.sizes()[dim], t.options());
auto rangeview = range.view(sizes);
t.copy_(rangeview);
}
}
// In alignment with default sort on a c++ map, this function
// will permute key and value tensors identically, and
// in such a way that the 'key' tensor is ordered numerically
void sortKeyValueInplace(const Tensor& key,
const Tensor& value,
int dim, bool dir) {
TORCH_CHECK(key.sizes() == value.sizes(),
"Key tensor must have same size as value tensor");
int dims = value.dim();
TORCH_CHECK(dims <= MAX_DIMS, "value tensor has too many dimensions");
// if key and value tensors have the same size, we do not need to check both
ptrdiff_t inElements = key.numel();
if (inElements == 0) {
return;
}
int64_t keySliceSize = key.size(dim);
ptrdiff_t keySlices = inElements / keySliceSize;
// The amount of shared memory and block size is based on
// 2^ceil(lg(n)); we choose that sorting implementation for a given
// size.
int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize);
// FIXME: We'd have to find some other trick with Thrust to perform a
// vectorized (key, value) sort by slice segment
TORCH_INTERNAL_ASSERT(ceilPowerOf2 <= 2048, "sortKeyValueInplace only works for sizes <= 2048 at present");
// The grid is based on the number of independent slices that we
// have to sort; one block per slice
dim3 grid;
TORCH_INTERNAL_ASSERT(getGridFromTiles(keySlices, grid), "Too many slices to sort");
#define HANDLE_CASE(TYPE, A, SIZE) \
do { \
int blockSize = SIZE / 2; \
if (blockSize < 1) { \
blockSize = 1; \
} \
\
dim3 block(blockSize); \
\
if (dir) { \
hipLaunchKernelGGL(( bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \
GTComp<scalar_t, true>, TYPE, SIZE>) \
, dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
GTComp<scalar_t, true>()); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} else { \
hipLaunchKernelGGL(( bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \
LTComp<scalar_t, true>, TYPE, SIZE>) \
, dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
LTComp<scalar_t, true>()); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
} while (0)
#define HANDLE_SORT_CASE(TYPE, A) \
{ \
switch (ceilPowerOf2) { \
case 2048: \
HANDLE_CASE(TYPE, A, 2048); \
break; \
case 1024: \
case 512: \
case 256: \
HANDLE_CASE(TYPE, A, 1024); \
break; \
case 128: \
case 64: \
HANDLE_CASE(TYPE, A, 128); \
break; \
case 32: \
case 16: \
case 8: \
case 4: \
case 2: \
HANDLE_CASE(TYPE, A, 32); \
break; \
case 1: \
/* Nothing to do, data already sorted */ \
break; \
default: \
TORCH_INTERNAL_ASSERT(false); \
} \
}
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, key.scalar_type(), "sortKeyValueInplace", [&] {
if (at::cuda::detail::canUse32BitIndexMath(key)) {
at::cuda::detail::TensorInfo<scalar_t, unsigned int> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(key);
at::cuda::detail::TensorInfo<int64_t, unsigned int> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, unsigned int>(value);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
if (keyInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned int, -2);
} else {
switch (keyInfo.dims) {
case 2:
HANDLE_SORT_CASE(unsigned int, 2);
break;
default:
HANDLE_SORT_CASE(unsigned int, -1);
break;
}
}
} else {
at::cuda::detail::TensorInfo<scalar_t, uint64_t> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(key);
at::cuda::detail::TensorInfo<int64_t, uint64_t> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, uint64_t>(value);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
// int64_t case is rare, just instantiate the generic version
HANDLE_SORT_CASE(uint64_t, -1);
}
});
#undef HANDLE_CASE
#undef HANDLE_SORT_CASE
#undef HANDLE_A_CASE
}
namespace {
struct offset_t {
int stride;
int begin;
__device__ int operator[](int i) {
return stride * (begin + i);
}
};
}
// We perform a segmented sort in cub with inputs that have
// more than 1024/2048 elements along the selected dimension.
// Otherwise, we do an inplace bitonic sort (see sortKeyValueInplace).
std::tuple<Tensor &,Tensor &> sort_out_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending, Tensor & values, Tensor & indices) {
// this algorithm is always stable
TORCH_INTERNAL_ASSERT(stable.has_value(), "sort_out(): c10::optional<bool> for stable has to have value.");
TensorArg self_arg{self, "self", 1}, values_arg{values, "values", 2}, indices_arg{indices, "indices", 3};
checkAllSameGPU("small_sort", {self_arg, values_arg, indices_arg});
bool is_non_overlapping_and_dense = self.is_non_overlapping_and_dense();
int64_t numel = self.numel();
int64_t ndim = self.dim();
dim = maybe_wrap_dim(dim, ndim);
int64_t nsort = self.sizes()[dim];
TORCH_CHECK(nsort <= std::numeric_limits<int>::max(),
"The dimension being sorted can not have more than INT_MAX elements.");
const auto self_dtype = self.dtype();
// FIXME: remove this check once cub sort supports bool
TORCH_CHECK(self_dtype != ScalarType::Bool,
"Sort currently does not support bool dtype on CUDA.");
TORCH_CHECK(self_dtype != ScalarType::ComplexFloat && self_dtype != ScalarType::ComplexDouble,
"Sort currently does not support complex dtypes on CUDA.");
if (ndim == 0) {
if (!values.defined()) {
values = self.clone();
} else {
values.resize_as_(self);
values.copy_(self);
}
if (!indices.defined()) {
indices = at::zeros({}, self.options().dtype(kLong));
} else {
indices.resize_as_(self);
indices.zero_();
}
return std::forward_as_tuple(values, indices);
}
// use inplace algorithm for smaller input sizes without stable=True
if (should_use_small_sort(self, dim) && !stable.value()) {
// from thc: sorted->values, indices->indices, input->self
if (!values.defined()) {
values = at::empty_like(self);
}
if (!indices.defined()) {
indices = at::empty_like(self, self.options().dtype(kLong));
}
// Make sure sufficient output space is allocated
auto self_size = self.sizes();
at::native::resize_output(values, self_size);
at::native::resize_output(indices, self_size);
fillSliceWithIndex(indices, dim);
// We sort k/v pairs in-place; copy unsorted input to output
values.copy_(self);
// Sort using our in-place k/v kernel that supports arbitrary
// layout
sortKeyValueInplace(values, indices, dim, descending);
return std::forward_as_tuple(values, indices);
}
Tensor self_;
if (is_non_overlapping_and_dense && self.stride(dim) == 1) {
self_ = self;
} else {
auto new_strides_unsort = infer_dense_strides_dim_last(self, dim);
self_ = at::empty_strided(self.sizes(), new_strides_unsort, self.options());
self_.copy_(self);
}
Tensor values_tmp, indices_tmp;
void *values_ptr_;
int64_t *indices_ptr;
if (!values.defined()) {
if (is_non_overlapping_and_dense) {
values = at::empty_strided(self.sizes(), self.strides(), self.options());
} else {
auto strides = at::infer_dense_strides(self.sizes(), self.strides());
values = at::empty_strided(self.sizes(), strides, self.options());
}
} else {
TORCH_CHECK(self_.scalar_type() == values.scalar_type(),
"Unexpected dtype for values, expect ", self_.scalar_type(), ", got ", values.scalar_type());
values.resize_as_(self);
}
if (values.strides() != self_.strides()) {
values_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options());
values_ptr_ = values_tmp.data_ptr();
} else {
values_ptr_ = values.data_ptr();
}
if (!indices.defined()) {
if (is_non_overlapping_and_dense) {
indices = at::empty_strided(self.sizes(), self.strides(), self.options().dtype(kLong));
} else {
auto strides = at::infer_dense_strides(self.sizes(), self.strides());
indices = at::empty_strided(self.sizes(), strides, self.options().dtype(kLong));
}
} else {
TORCH_CHECK(kLong == indices.scalar_type(),
"Unexpected dtype for values, expect torch.long, got ", indices.scalar_type());
indices.resize_as_(self);
}
if (indices.strides() != self_.strides()) {
indices_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options().dtype(kLong));
indices_ptr = indices_tmp.data_ptr<int64_t>();
} else {
indices_ptr = indices.data_ptr<int64_t>();
}
if (numel == 0) {
return std::forward_as_tuple(values, indices);
}
int64_t numel_or_intmax = ::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max()));
int64_t nbatch = (numel_or_intmax / nsort) * nsort;
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, self_.scalar_type(), "sort", [&]{
const scalar_t *self_ptr = self_.data_ptr<scalar_t>();
auto values_ptr = reinterpret_cast<scalar_t *>(values_ptr_);
int64_t remaining = numel;
while (remaining > 0) {
int64_t n = ::min(remaining, nbatch);
int64_t nsegments = n / nsort;
auto reverse_indices = at::arange(nsort, indices.options()).view({1, nsort}).expand({nsegments, nsort}).contiguous();
at::cuda::cub::segmented_sort_pairs(self_ptr, values_ptr,
reverse_indices.data_ptr<int64_t>(), indices_ptr, n, nsegments,
offset_t{(int)nsort, 0}, offset_t{(int)nsort, 1}, descending);
remaining -= n;
self_ptr += n;
values_ptr += n;
indices_ptr += n;
}
});
if (values_tmp.defined()) {
values.copy_(values_tmp);
}
if (indices_tmp.defined()) {
indices.copy_(indices_tmp);
}
return std::forward_as_tuple(values, indices);
}
std::tuple<Tensor &,Tensor &> sort_out_cuda(const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices) {
return sort_out_stable_cuda(self, /*stable=*/false, dim, descending, values, indices);
}
std::tuple<Tensor,Tensor> sort_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
Tensor values, indices;
return sort_out_stable_cuda(self, stable, dim, descending, values, indices);
}
std::tuple<Tensor,Tensor> sort_cuda(const Tensor & self, int64_t dim, bool descending) { int64_t threshold;
return sort_stable_cuda(self, /*stable=*/false, dim, descending);
}
}} // namespace at::native
|
0a0eec5dfba6645b57031ff33dc328f03359bd82.cu
|
#include <limits>
#include <ATen/ATen.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/core/Array.h>
#include <ATen/cuda/cub.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/SortUtils.cuh>
#include <ATen/native/cuda/SortingCommon.cuh>
namespace at { namespace native {
bool should_use_small_sort(const Tensor &self, int64_t dim) {
int64_t ndim = self.dim();
int64_t nsort = self.sizes()[dim];
int64_t threshold;
if (self.scalar_type() == kLong || self.scalar_type() == kDouble) {
threshold = 1024;
} else {
threshold = 2048;
}
return nsort <= threshold;
}
std::vector<int64_t> infer_dense_strides_dim_last(const Tensor & self, int64_t dim);
void fillSliceWithIndex(Tensor& t,int dim) {
if (t.numel()) {
auto sizes = DimVector(t.dim(), 1);
sizes[dim] = t.sizes()[dim];
auto range = at::arange(t.sizes()[dim], t.options());
auto rangeview = range.view(sizes);
t.copy_(rangeview);
}
}
// In alignment with default sort on a c++ map, this function
// will permute key and value tensors identically, and
// in such a way that the 'key' tensor is ordered numerically
void sortKeyValueInplace(const Tensor& key,
const Tensor& value,
int dim, bool dir) {
TORCH_CHECK(key.sizes() == value.sizes(),
"Key tensor must have same size as value tensor");
int dims = value.dim();
TORCH_CHECK(dims <= MAX_DIMS, "value tensor has too many dimensions");
// if key and value tensors have the same size, we do not need to check both
ptrdiff_t inElements = key.numel();
if (inElements == 0) {
return;
}
int64_t keySliceSize = key.size(dim);
ptrdiff_t keySlices = inElements / keySliceSize;
// The amount of shared memory and block size is based on
// 2^ceil(lg(n)); we choose that sorting implementation for a given
// size.
int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize);
// FIXME: We'd have to find some other trick with Thrust to perform a
// vectorized (key, value) sort by slice segment
TORCH_INTERNAL_ASSERT(ceilPowerOf2 <= 2048, "sortKeyValueInplace only works for sizes <= 2048 at present");
// The grid is based on the number of independent slices that we
// have to sort; one block per slice
dim3 grid;
TORCH_INTERNAL_ASSERT(getGridFromTiles(keySlices, grid), "Too many slices to sort");
#define HANDLE_CASE(TYPE, A, SIZE) \
do { \
int blockSize = SIZE / 2; \
if (blockSize < 1) { \
blockSize = 1; \
} \
\
dim3 block(blockSize); \
\
if (dir) { \
bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \
GTComp<scalar_t, true>, TYPE, SIZE> \
<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
GTComp<scalar_t, true>()); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} else { \
bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \
LTComp<scalar_t, true>, TYPE, SIZE> \
<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
LTComp<scalar_t, true>()); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
} while (0)
#define HANDLE_SORT_CASE(TYPE, A) \
{ \
switch (ceilPowerOf2) { \
case 2048: \
HANDLE_CASE(TYPE, A, 2048); \
break; \
case 1024: \
case 512: \
case 256: \
HANDLE_CASE(TYPE, A, 1024); \
break; \
case 128: \
case 64: \
HANDLE_CASE(TYPE, A, 128); \
break; \
case 32: \
case 16: \
case 8: \
case 4: \
case 2: \
HANDLE_CASE(TYPE, A, 32); \
break; \
case 1: \
/* Nothing to do, data already sorted */ \
break; \
default: \
TORCH_INTERNAL_ASSERT(false); \
} \
}
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, key.scalar_type(), "sortKeyValueInplace", [&] {
if (at::cuda::detail::canUse32BitIndexMath(key)) {
at::cuda::detail::TensorInfo<scalar_t, unsigned int> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(key);
at::cuda::detail::TensorInfo<int64_t, unsigned int> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, unsigned int>(value);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
if (keyInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned int, -2);
} else {
switch (keyInfo.dims) {
case 2:
HANDLE_SORT_CASE(unsigned int, 2);
break;
default:
HANDLE_SORT_CASE(unsigned int, -1);
break;
}
}
} else {
at::cuda::detail::TensorInfo<scalar_t, uint64_t> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(key);
at::cuda::detail::TensorInfo<int64_t, uint64_t> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, uint64_t>(value);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
// int64_t case is rare, just instantiate the generic version
HANDLE_SORT_CASE(uint64_t, -1);
}
});
#undef HANDLE_CASE
#undef HANDLE_SORT_CASE
#undef HANDLE_A_CASE
}
namespace {
struct offset_t {
int stride;
int begin;
__device__ int operator[](int i) {
return stride * (begin + i);
}
};
}
// We perform a segmented sort in cub with inputs that have
// more than 1024/2048 elements along the selected dimension.
// Otherwise, we do an inplace bitonic sort (see sortKeyValueInplace).
std::tuple<Tensor &,Tensor &> sort_out_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending, Tensor & values, Tensor & indices) {
// this algorithm is always stable
TORCH_INTERNAL_ASSERT(stable.has_value(), "sort_out(): c10::optional<bool> for stable has to have value.");
TensorArg self_arg{self, "self", 1}, values_arg{values, "values", 2}, indices_arg{indices, "indices", 3};
checkAllSameGPU("small_sort", {self_arg, values_arg, indices_arg});
bool is_non_overlapping_and_dense = self.is_non_overlapping_and_dense();
int64_t numel = self.numel();
int64_t ndim = self.dim();
dim = maybe_wrap_dim(dim, ndim);
int64_t nsort = self.sizes()[dim];
TORCH_CHECK(nsort <= std::numeric_limits<int>::max(),
"The dimension being sorted can not have more than INT_MAX elements.");
const auto self_dtype = self.dtype();
// FIXME: remove this check once cub sort supports bool
TORCH_CHECK(self_dtype != ScalarType::Bool,
"Sort currently does not support bool dtype on CUDA.");
TORCH_CHECK(self_dtype != ScalarType::ComplexFloat && self_dtype != ScalarType::ComplexDouble,
"Sort currently does not support complex dtypes on CUDA.");
if (ndim == 0) {
if (!values.defined()) {
values = self.clone();
} else {
values.resize_as_(self);
values.copy_(self);
}
if (!indices.defined()) {
indices = at::zeros({}, self.options().dtype(kLong));
} else {
indices.resize_as_(self);
indices.zero_();
}
return std::forward_as_tuple(values, indices);
}
// use inplace algorithm for smaller input sizes without stable=True
if (should_use_small_sort(self, dim) && !stable.value()) {
// from thc: sorted->values, indices->indices, input->self
if (!values.defined()) {
values = at::empty_like(self);
}
if (!indices.defined()) {
indices = at::empty_like(self, self.options().dtype(kLong));
}
// Make sure sufficient output space is allocated
auto self_size = self.sizes();
at::native::resize_output(values, self_size);
at::native::resize_output(indices, self_size);
fillSliceWithIndex(indices, dim);
// We sort k/v pairs in-place; copy unsorted input to output
values.copy_(self);
// Sort using our in-place k/v kernel that supports arbitrary
// layout
sortKeyValueInplace(values, indices, dim, descending);
return std::forward_as_tuple(values, indices);
}
Tensor self_;
if (is_non_overlapping_and_dense && self.stride(dim) == 1) {
self_ = self;
} else {
auto new_strides_unsort = infer_dense_strides_dim_last(self, dim);
self_ = at::empty_strided(self.sizes(), new_strides_unsort, self.options());
self_.copy_(self);
}
Tensor values_tmp, indices_tmp;
void *values_ptr_;
int64_t *indices_ptr;
if (!values.defined()) {
if (is_non_overlapping_and_dense) {
values = at::empty_strided(self.sizes(), self.strides(), self.options());
} else {
auto strides = at::infer_dense_strides(self.sizes(), self.strides());
values = at::empty_strided(self.sizes(), strides, self.options());
}
} else {
TORCH_CHECK(self_.scalar_type() == values.scalar_type(),
"Unexpected dtype for values, expect ", self_.scalar_type(), ", got ", values.scalar_type());
values.resize_as_(self);
}
if (values.strides() != self_.strides()) {
values_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options());
values_ptr_ = values_tmp.data_ptr();
} else {
values_ptr_ = values.data_ptr();
}
if (!indices.defined()) {
if (is_non_overlapping_and_dense) {
indices = at::empty_strided(self.sizes(), self.strides(), self.options().dtype(kLong));
} else {
auto strides = at::infer_dense_strides(self.sizes(), self.strides());
indices = at::empty_strided(self.sizes(), strides, self.options().dtype(kLong));
}
} else {
TORCH_CHECK(kLong == indices.scalar_type(),
"Unexpected dtype for values, expect torch.long, got ", indices.scalar_type());
indices.resize_as_(self);
}
if (indices.strides() != self_.strides()) {
indices_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options().dtype(kLong));
indices_ptr = indices_tmp.data_ptr<int64_t>();
} else {
indices_ptr = indices.data_ptr<int64_t>();
}
if (numel == 0) {
return std::forward_as_tuple(values, indices);
}
int64_t numel_or_intmax = std::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max()));
int64_t nbatch = (numel_or_intmax / nsort) * nsort;
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, self_.scalar_type(), "sort", [&]{
const scalar_t *self_ptr = self_.data_ptr<scalar_t>();
auto values_ptr = reinterpret_cast<scalar_t *>(values_ptr_);
int64_t remaining = numel;
while (remaining > 0) {
int64_t n = std::min(remaining, nbatch);
int64_t nsegments = n / nsort;
auto reverse_indices = at::arange(nsort, indices.options()).view({1, nsort}).expand({nsegments, nsort}).contiguous();
at::cuda::cub::segmented_sort_pairs(self_ptr, values_ptr,
reverse_indices.data_ptr<int64_t>(), indices_ptr, n, nsegments,
offset_t{(int)nsort, 0}, offset_t{(int)nsort, 1}, descending);
remaining -= n;
self_ptr += n;
values_ptr += n;
indices_ptr += n;
}
});
if (values_tmp.defined()) {
values.copy_(values_tmp);
}
if (indices_tmp.defined()) {
indices.copy_(indices_tmp);
}
return std::forward_as_tuple(values, indices);
}
std::tuple<Tensor &,Tensor &> sort_out_cuda(const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices) {
return sort_out_stable_cuda(self, /*stable=*/false, dim, descending, values, indices);
}
std::tuple<Tensor,Tensor> sort_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
Tensor values, indices;
return sort_out_stable_cuda(self, stable, dim, descending, values, indices);
}
std::tuple<Tensor,Tensor> sort_cuda(const Tensor & self, int64_t dim, bool descending) { int64_t threshold;
return sort_stable_cuda(self, /*stable=*/false, dim, descending);
}
}} // namespace at::native
|
88daf02bdf46068e42bc5681ef07488b8d77aca4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// 3D convolution by CUDA
__global__ void cu_conv(const float *A,const float *K,const float *B, int kw, int kh, int kn, int cw_rem, int ch_rem, float *C){
// A : input data, K : Kernel, B : bias
int cx = threadIdx.x + blockIdx.x*blockDim.x;
int cy = threadIdx.y + blockIdx.y*blockDim.y;
int cz = blockIdx.z/int(kn*kh*kw);
int n = (blockIdx.z%(kn*kh*kw)) / (kh*kw);
int j = ((blockIdx.z%(kn*kh*kw)) % (kh*kw)) / kw;
int i = ((blockIdx.z%(kn*kh*kw)) % (kh*kw)) % kw;
int cw = blockDim.x*gridDim.x + cw_rem;
int ch = blockDim.y*gridDim.y + ch_rem;
int aw = cw + (kw-1);
int ah = ch + (kh-1);
int cidx = cx + cy*cw + cz*(cw*ch);
int aidx = (cx+i) + (cy+j)*aw + (cz)*(aw*ah);
int kidx = i + j*kw + n*(kw*kh);
int bidx = n;
if (cx < cw && cy < ch){
C[cidx] = A[aidx]*K[kidx] + B[bidx]/(kw*kh);
}
}
|
88daf02bdf46068e42bc5681ef07488b8d77aca4.cu
|
// 3D convolution by CUDA
__global__ void cu_conv(const float *A,const float *K,const float *B, int kw, int kh, int kn, int cw_rem, int ch_rem, float *C){
// A : input data, K : Kernel, B : bias
int cx = threadIdx.x + blockIdx.x*blockDim.x;
int cy = threadIdx.y + blockIdx.y*blockDim.y;
int cz = blockIdx.z/int(kn*kh*kw);
int n = (blockIdx.z%(kn*kh*kw)) / (kh*kw);
int j = ((blockIdx.z%(kn*kh*kw)) % (kh*kw)) / kw;
int i = ((blockIdx.z%(kn*kh*kw)) % (kh*kw)) % kw;
int cw = blockDim.x*gridDim.x + cw_rem;
int ch = blockDim.y*gridDim.y + ch_rem;
int aw = cw + (kw-1);
int ah = ch + (kh-1);
int cidx = cx + cy*cw + cz*(cw*ch);
int aidx = (cx+i) + (cy+j)*aw + (cz)*(aw*ah);
int kidx = i + j*kw + n*(kw*kh);
int bidx = n;
if (cx < cw && cy < ch){
C[cidx] = A[aidx]*K[kidx] + B[bidx]/(kw*kh);
}
}
|
9fe61093a7f43a505e3b3537ecdec1735020b80e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void kernel(int *a, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] = a[i] * 2;
}
}
void init(int *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = 2;
}
}
int main(int argc, char **argv)
{
int deviceId;
hipGetDevice(&deviceId);
int E = 20;
if (argc > 1) E = atoi(argv[1]);
int N = 2<<E;
printf("N is 2<<%d: %d\n", E, 2<<E);
int *a;
size_t size = N * sizeof(int);
hipMallocManaged(&a, size);
init(a, N);
hipMemPrefetchAsync(a, size, deviceId);
size_t threadsPerBlock = 256;
size_t numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( kernel), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, a, N);
hipDeviceSynchronize();
printf("Done\n");
}
|
9fe61093a7f43a505e3b3537ecdec1735020b80e.cu
|
#include <stdio.h>
__global__
void kernel(int *a, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] = a[i] * 2;
}
}
void init(int *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = 2;
}
}
int main(int argc, char **argv)
{
int deviceId;
cudaGetDevice(&deviceId);
int E = 20;
if (argc > 1) E = atoi(argv[1]);
int N = 2<<E;
printf("N is 2<<%d: %d\n", E, 2<<E);
int *a;
size_t size = N * sizeof(int);
cudaMallocManaged(&a, size);
init(a, N);
cudaMemPrefetchAsync(a, size, deviceId);
size_t threadsPerBlock = 256;
size_t numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
kernel<<<numberOfBlocks, threadsPerBlock>>>(a, N);
cudaDeviceSynchronize();
printf("Done\n");
}
|
5835ecc61f5e0d44c941ad58cf994b6ec2ff4392.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6) {
if (comp > (var_2 / +0.0f)) {
for (int i=0; i < var_1; ++i) {
comp += (-1.7167E-41f / (+0.0f - (var_3 - -1.5503E-35f)));
if (comp > (+1.2504E36f * coshf(+1.2904E-42f * var_4))) {
float tmp_1 = tanhf((-1.5664E-8f / var_5 / (-1.3954E35f / -1.1985E-35f)));
float tmp_2 = -1.7700E35f;
float tmp_3 = -1.6487E-13f;
comp = tmp_3 - tmp_2 + tmp_1 + (-1.5199E17f * (var_6 / +1.2018E-35f));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7);
hipDeviceSynchronize();
return 0;
}
|
5835ecc61f5e0d44c941ad58cf994b6ec2ff4392.cu
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6) {
if (comp > (var_2 / +0.0f)) {
for (int i=0; i < var_1; ++i) {
comp += (-1.7167E-41f / (+0.0f - (var_3 - -1.5503E-35f)));
if (comp > (+1.2504E36f * coshf(+1.2904E-42f * var_4))) {
float tmp_1 = tanhf((-1.5664E-8f / var_5 / (-1.3954E35f / -1.1985E-35f)));
float tmp_2 = -1.7700E35f;
float tmp_3 = -1.6487E-13f;
comp = tmp_3 - tmp_2 + tmp_1 + (-1.5199E17f * (var_6 / +1.2018E-35f));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7);
cudaDeviceSynchronize();
return 0;
}
|
bbc30bff9fb4768db047015d7cf7d865f48389a8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <wb.h>
//@@ The purpose of this code is to become familiar with the submission
//@@ process. Do not worry if you do not understand all the details of
//@@ the code.
int main(int argc, char **argv) {
int deviceCount;
wbArg_read(argc, argv);
hipGetDeviceCount(&deviceCount);
wbTime_start(GPU, "Getting GPU Data."); //@@ start a timer
for (int dev = 0; dev < deviceCount; dev++) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
wbLog(TRACE, "No CUDA GPU has been detected");
return -1;
} else if (deviceCount == 1) {
//@@ WbLog is a provided logging API (similar to Log4J).
//@@ The logging function wbLog takes a level which is either
//@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG, or TRACE and a
//@@ message to be printed.
wbLog(TRACE, "There is 1 device supporting CUDA");
} else {
wbLog(TRACE, "There are ", deviceCount,
" devices supporting CUDA");
}
}
wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name);
wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".",
deviceProp.minor);
wbLog(TRACE, " Maximum global memory size: ",
deviceProp.totalGlobalMem);
wbLog(TRACE, " Maximum constant memory size: ",
deviceProp.totalConstMem);
wbLog(TRACE, " Maximum shared memory size per block: ",
deviceProp.sharedMemPerBlock);
wbLog(TRACE, " Maximum block dimensions: ",
deviceProp.maxThreadsDim[0], " x ", deviceProp.maxThreadsDim[1],
" x ", deviceProp.maxThreadsDim[2]);
wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0],
" x ", deviceProp.maxGridSize[1], " x ",
deviceProp.maxGridSize[2]);
wbLog(TRACE, " Warp size: ", deviceProp.warpSize);
//added
wbLog(TRACE, "Max Threads per Block: ", deviceProp.maxThreadsPerBlock );
wbLog(TRACE, "Device overlap able ", deviceProp.deviceOverlap);
}
wbTime_stop(GPU, "Getting GPU Data."); //@@ stop the timer
return 0;
}
|
bbc30bff9fb4768db047015d7cf7d865f48389a8.cu
|
#include <wb.h>
//@@ The purpose of this code is to become familiar with the submission
//@@ process. Do not worry if you do not understand all the details of
//@@ the code.
int main(int argc, char **argv) {
int deviceCount;
wbArg_read(argc, argv);
cudaGetDeviceCount(&deviceCount);
wbTime_start(GPU, "Getting GPU Data."); //@@ start a timer
for (int dev = 0; dev < deviceCount; dev++) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
wbLog(TRACE, "No CUDA GPU has been detected");
return -1;
} else if (deviceCount == 1) {
//@@ WbLog is a provided logging API (similar to Log4J).
//@@ The logging function wbLog takes a level which is either
//@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG, or TRACE and a
//@@ message to be printed.
wbLog(TRACE, "There is 1 device supporting CUDA");
} else {
wbLog(TRACE, "There are ", deviceCount,
" devices supporting CUDA");
}
}
wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name);
wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".",
deviceProp.minor);
wbLog(TRACE, " Maximum global memory size: ",
deviceProp.totalGlobalMem);
wbLog(TRACE, " Maximum constant memory size: ",
deviceProp.totalConstMem);
wbLog(TRACE, " Maximum shared memory size per block: ",
deviceProp.sharedMemPerBlock);
wbLog(TRACE, " Maximum block dimensions: ",
deviceProp.maxThreadsDim[0], " x ", deviceProp.maxThreadsDim[1],
" x ", deviceProp.maxThreadsDim[2]);
wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0],
" x ", deviceProp.maxGridSize[1], " x ",
deviceProp.maxGridSize[2]);
wbLog(TRACE, " Warp size: ", deviceProp.warpSize);
//added
wbLog(TRACE, "Max Threads per Block: ", deviceProp.maxThreadsPerBlock );
wbLog(TRACE, "Device overlap able ", deviceProp.deviceOverlap);
}
wbTime_stop(GPU, "Getting GPU Data."); //@@ stop the timer
return 0;
}
|
3b4f59055e9c1587555f5373a5bf06d2d0f791a1.hip
|
// !!! This is a file automatically generated by hipify!!!
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_unroll_width.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_unroll_width<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>> epilogue,
const ConvParam& param,
float alpha,
float beta,
hipStream_t stream);
|
3b4f59055e9c1587555f5373a5bf06d2d0f791a1.cu
|
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_unroll_width.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_unroll_width<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>> epilogue,
const ConvParam& param,
float alpha,
float beta,
cudaStream_t stream);
|
3c63d985faeb3a863d9ee7a5dafc0f2a97884f66.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void generateImg(unsigned char * data, unsigned char * img, unsigned char * tabDepth, int4 * _tabParents, int i, int tailleTab) {
int thx = blockIdx.x * blockDim.x + threadIdx.x;
int thy = blockIdx.y * blockDim.y + threadIdx.y;
int ThId = thy * tailleTab + thx;
int nbPar = 0;
if(data[ThId] == 0 && tabDepth[ThId] == i && i != 1) {
if(_tabParents[ThId].x != -1) nbPar ++;
if(_tabParents[ThId].y != -1) nbPar ++;
if(_tabParents[ThId].z != -1) nbPar ++;
if(_tabParents[ThId].w != -1) nbPar ++;
data[ThId] = (data[_tabParents[ThId].x] + data[_tabParents[ThId].y] + data[_tabParents[ThId].z] + data[_tabParents[ThId].w]) / nbPar;
img[ThId] = data[ThId];
}
}
|
3c63d985faeb3a863d9ee7a5dafc0f2a97884f66.cu
|
#include "includes.h"
__global__ void generateImg(unsigned char * data, unsigned char * img, unsigned char * tabDepth, int4 * _tabParents, int i, int tailleTab) {
int thx = blockIdx.x * blockDim.x + threadIdx.x;
int thy = blockIdx.y * blockDim.y + threadIdx.y;
int ThId = thy * tailleTab + thx;
int nbPar = 0;
if(data[ThId] == 0 && tabDepth[ThId] == i && i != 1) {
if(_tabParents[ThId].x != -1) nbPar ++;
if(_tabParents[ThId].y != -1) nbPar ++;
if(_tabParents[ThId].z != -1) nbPar ++;
if(_tabParents[ThId].w != -1) nbPar ++;
data[ThId] = (data[_tabParents[ThId].x] + data[_tabParents[ThId].y] + data[_tabParents[ThId].z] + data[_tabParents[ThId].w]) / nbPar;
img[ThId] = data[ThId];
}
}
|
0f312939ac9aafa4d62dddde6feb3568f493dcf4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <comm_quda.h>
#include <pgauge_monte.h>
#include <atomic.cuh>
#include <cub_helper.cuh>
#include <index_helper.cuh>
namespace quda {
#ifdef GPU_GAUGE_ALG
template <typename Gauge>
struct KernelArg : public ReduceArg<double2> {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
KernelArg(const Gauge &dataOr, const cudaGaugeField &data)
: ReduceArg<double2>(), dataOr(dataOr) {
#ifdef MULTI_GPU
for(int dir=0; dir<4; ++dir){
border[dir] = data.R()[dir];
X[dir] = data.X()[dir] - border[dir]*2;
}
#else
for(int dir=0; dir<4; ++dir) X[dir] = data.X()[dir];
#endif
threads = X[0]*X[1]*X[2]*X[3]/2;
}
double2 getValue(){return result_h[0];}
};
template<int blockSize, typename Float, typename Gauge, int NCOLORS, int functiontype>
__global__ void compute_Value(KernelArg<Gauge> arg){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int parity = threadIdx.y;
complex<double> val(0.0, 0.0);
while (idx < arg.threads) {
int X[4];
#pragma unroll
for(int dr=0; dr<4; ++dr) X[dr] = arg.X[dr];
int x[4];
getCoords(x, idx, X, parity);
#ifdef MULTI_GPU
#pragma unroll
for(int dr=0; dr<4; ++dr) {
x[dr] += arg.border[dr];
X[dr] += 2*arg.border[dr];
}
idx = linkIndex(x,X);
#endif
#pragma unroll
for (int mu = 0; mu < 4; mu++) {
Matrix<complex<Float>,NCOLORS> U;
arg.dataOr.load((Float*)(U.data), idx, mu, parity);
if(functiontype == 0) val += getDeterminant(U);
if(functiontype == 1) val += getTrace(U);
}
idx += blockDim.x*gridDim.x;
}
double2 sum = make_double2(val.real(), val.imag());
reduce2d<blockSize,2>(arg, sum);
}
template<typename Float, typename Gauge, int NCOLORS, int functiontype>
class CalcFunc : TunableLocalParity {
KernelArg<Gauge> arg;
TuneParam tp;
mutable char aux_string[128]; // used as a label in the autotuner
private:
bool tuneGridDim() const { return true; }
public:
CalcFunc(KernelArg<Gauge> &arg) : arg(arg) {}
~CalcFunc () { }
void apply(const hipStream_t &stream){
tp = tuneLaunch(*this, getTuning(), getVerbosity());
arg.result_h[0] = make_double2(0.0, 0.0);
LAUNCH_KERNEL_LOCAL_PARITY(compute_Value, (*this), tp, stream, arg, Float, Gauge, NCOLORS, functiontype);
qudaDeviceSynchronize();
comm_allreduce_array((double*)arg.result_h, 2);
arg.result_h[0].x /= (double)(4*2*arg.threads*comm_size());
arg.result_h[0].y /= (double)(4*2*arg.threads*comm_size());
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x" << arg.X[1] << "x" << arg.X[2] << "x" << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
long long flops() const {
if(NCOLORS==3 && functiontype == 0) return 264LL*2*arg.threads+2LL*tp.block.x;
else if(NCOLORS==3 && functiontype == 1) return 24LL*2*arg.threads+2LL*tp.block.x;
else return 0;
}// Only correct if there is no link reconstruction
long long bytes() const { return 4LL*NCOLORS * NCOLORS * sizeof(Float)*2*2*arg.threads + tp.block.x * sizeof(double2); }
};
template<typename Float, int NCOLORS, int functiontype, typename Gauge>
double2 computeValue( Gauge dataOr, cudaGaugeField& data) {
TimeProfile profileGenericFunc("GenericFunc", false);
if (getVerbosity() >= QUDA_SUMMARIZE) profileGenericFunc.TPSTART(QUDA_PROFILE_COMPUTE);
KernelArg<Gauge> arg(dataOr, data);
CalcFunc<Float, Gauge, NCOLORS, functiontype> func(arg);
func.apply(0);
if(getVerbosity() >= QUDA_SUMMARIZE && functiontype == 0) printfQuda("Determinant: %.16e, %.16e\n", arg.getValue().x, arg.getValue().y);
if(getVerbosity() >= QUDA_SUMMARIZE && functiontype == 1) printfQuda("Trace: %.16e, %.16e\n", arg.getValue().x, arg.getValue().y);
checkCudaError();
qudaDeviceSynchronize();
if (getVerbosity() >= QUDA_SUMMARIZE){
profileGenericFunc.TPSTOP(QUDA_PROFILE_COMPUTE);
double secs = profileGenericFunc.Last(QUDA_PROFILE_COMPUTE);
double gflops = (func.flops()*1e-9)/(secs);
double gbytes = func.bytes()/(secs*1e9);
if(functiontype == 0){
#ifdef MULTI_GPU
printfQuda("Determinant: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops*comm_size(), gbytes*comm_size());
#else
printfQuda("Determinant: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes);
#endif
}
if(functiontype == 1){
#ifdef MULTI_GPU
printfQuda("Trace: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops*comm_size(), gbytes*comm_size());
#else
printfQuda("Trace: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes);
#endif
}
}
return arg.getValue();
}
template<typename Float, int functiontype>
double2 computeValue(cudaGaugeField& data) {
double2 rtn = make_double2(0.0,0.0);
// Switching to FloatNOrder for the gauge field in order to support RECONSTRUCT_12
if(data.isNative()) {
if(data.Reconstruct() == QUDA_RECONSTRUCT_NO) {
//printfQuda("QUDA_RECONSTRUCT_NO\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge;
rtn = computeValue<Float, 3, functiontype>(Gauge(data), data);
} else if(data.Reconstruct() == QUDA_RECONSTRUCT_12){
//printfQuda("QUDA_RECONSTRUCT_12\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge;
rtn = computeValue<Float, 3, functiontype>(Gauge(data), data);
} else if(data.Reconstruct() == QUDA_RECONSTRUCT_8){
//printfQuda("QUDA_RECONSTRUCT_8\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge;
rtn = computeValue<Float, 3, functiontype>(Gauge(data), data);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
return rtn;
}
#endif // GPU_GAUGE_ALG
/** @brief Calculate the Determinant
*
* @param[in] data Gauge field
* @returns double2 complex Determinant value
*/
double2 getLinkDeterminant( cudaGaugeField& data) {
double2 det = make_double2(0.0,0.0);
#ifdef GPU_GAUGE_ALG
if (data.Precision() == QUDA_SINGLE_PRECISION) {
det = computeValue<float, 0> (data);
} else if(data.Precision() == QUDA_DOUBLE_PRECISION) {
det = computeValue<double, 0>(data);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Pure gauge code has not been built");
#endif // GPU_GAUGE_ALG
return det;
}
/** @brief Calculate the Trace
*
* @param[in] data Gauge field
* @returns double2 complex trace value
*/
double2 getLinkTrace( cudaGaugeField& data) {
double2 det = make_double2(0.0,0.0);
#ifdef GPU_GAUGE_ALG
if (data.Precision() == QUDA_SINGLE_PRECISION) {
det = computeValue<float, 1> (data);
} else if(data.Precision() == QUDA_DOUBLE_PRECISION) {
det = computeValue<double, 1>(data);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Pure gauge code has not been built");
#endif // GPU_GAUGE_ALG
return det;
}
} // namespace quda
|
0f312939ac9aafa4d62dddde6feb3568f493dcf4.cu
|
#include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <comm_quda.h>
#include <pgauge_monte.h>
#include <atomic.cuh>
#include <cub_helper.cuh>
#include <index_helper.cuh>
namespace quda {
#ifdef GPU_GAUGE_ALG
template <typename Gauge>
struct KernelArg : public ReduceArg<double2> {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
Gauge dataOr;
KernelArg(const Gauge &dataOr, const cudaGaugeField &data)
: ReduceArg<double2>(), dataOr(dataOr) {
#ifdef MULTI_GPU
for(int dir=0; dir<4; ++dir){
border[dir] = data.R()[dir];
X[dir] = data.X()[dir] - border[dir]*2;
}
#else
for(int dir=0; dir<4; ++dir) X[dir] = data.X()[dir];
#endif
threads = X[0]*X[1]*X[2]*X[3]/2;
}
double2 getValue(){return result_h[0];}
};
template<int blockSize, typename Float, typename Gauge, int NCOLORS, int functiontype>
__global__ void compute_Value(KernelArg<Gauge> arg){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int parity = threadIdx.y;
complex<double> val(0.0, 0.0);
while (idx < arg.threads) {
int X[4];
#pragma unroll
for(int dr=0; dr<4; ++dr) X[dr] = arg.X[dr];
int x[4];
getCoords(x, idx, X, parity);
#ifdef MULTI_GPU
#pragma unroll
for(int dr=0; dr<4; ++dr) {
x[dr] += arg.border[dr];
X[dr] += 2*arg.border[dr];
}
idx = linkIndex(x,X);
#endif
#pragma unroll
for (int mu = 0; mu < 4; mu++) {
Matrix<complex<Float>,NCOLORS> U;
arg.dataOr.load((Float*)(U.data), idx, mu, parity);
if(functiontype == 0) val += getDeterminant(U);
if(functiontype == 1) val += getTrace(U);
}
idx += blockDim.x*gridDim.x;
}
double2 sum = make_double2(val.real(), val.imag());
reduce2d<blockSize,2>(arg, sum);
}
template<typename Float, typename Gauge, int NCOLORS, int functiontype>
class CalcFunc : TunableLocalParity {
KernelArg<Gauge> arg;
TuneParam tp;
mutable char aux_string[128]; // used as a label in the autotuner
private:
bool tuneGridDim() const { return true; }
public:
CalcFunc(KernelArg<Gauge> &arg) : arg(arg) {}
~CalcFunc () { }
void apply(const cudaStream_t &stream){
tp = tuneLaunch(*this, getTuning(), getVerbosity());
arg.result_h[0] = make_double2(0.0, 0.0);
LAUNCH_KERNEL_LOCAL_PARITY(compute_Value, (*this), tp, stream, arg, Float, Gauge, NCOLORS, functiontype);
qudaDeviceSynchronize();
comm_allreduce_array((double*)arg.result_h, 2);
arg.result_h[0].x /= (double)(4*2*arg.threads*comm_size());
arg.result_h[0].y /= (double)(4*2*arg.threads*comm_size());
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x" << arg.X[1] << "x" << arg.X[2] << "x" << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
long long flops() const {
if(NCOLORS==3 && functiontype == 0) return 264LL*2*arg.threads+2LL*tp.block.x;
else if(NCOLORS==3 && functiontype == 1) return 24LL*2*arg.threads+2LL*tp.block.x;
else return 0;
}// Only correct if there is no link reconstruction
long long bytes() const { return 4LL*NCOLORS * NCOLORS * sizeof(Float)*2*2*arg.threads + tp.block.x * sizeof(double2); }
};
template<typename Float, int NCOLORS, int functiontype, typename Gauge>
double2 computeValue( Gauge dataOr, cudaGaugeField& data) {
TimeProfile profileGenericFunc("GenericFunc", false);
if (getVerbosity() >= QUDA_SUMMARIZE) profileGenericFunc.TPSTART(QUDA_PROFILE_COMPUTE);
KernelArg<Gauge> arg(dataOr, data);
CalcFunc<Float, Gauge, NCOLORS, functiontype> func(arg);
func.apply(0);
if(getVerbosity() >= QUDA_SUMMARIZE && functiontype == 0) printfQuda("Determinant: %.16e, %.16e\n", arg.getValue().x, arg.getValue().y);
if(getVerbosity() >= QUDA_SUMMARIZE && functiontype == 1) printfQuda("Trace: %.16e, %.16e\n", arg.getValue().x, arg.getValue().y);
checkCudaError();
qudaDeviceSynchronize();
if (getVerbosity() >= QUDA_SUMMARIZE){
profileGenericFunc.TPSTOP(QUDA_PROFILE_COMPUTE);
double secs = profileGenericFunc.Last(QUDA_PROFILE_COMPUTE);
double gflops = (func.flops()*1e-9)/(secs);
double gbytes = func.bytes()/(secs*1e9);
if(functiontype == 0){
#ifdef MULTI_GPU
printfQuda("Determinant: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops*comm_size(), gbytes*comm_size());
#else
printfQuda("Determinant: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes);
#endif
}
if(functiontype == 1){
#ifdef MULTI_GPU
printfQuda("Trace: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops*comm_size(), gbytes*comm_size());
#else
printfQuda("Trace: Time = %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes);
#endif
}
}
return arg.getValue();
}
template<typename Float, int functiontype>
double2 computeValue(cudaGaugeField& data) {
double2 rtn = make_double2(0.0,0.0);
// Switching to FloatNOrder for the gauge field in order to support RECONSTRUCT_12
if(data.isNative()) {
if(data.Reconstruct() == QUDA_RECONSTRUCT_NO) {
//printfQuda("QUDA_RECONSTRUCT_NO\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge;
rtn = computeValue<Float, 3, functiontype>(Gauge(data), data);
} else if(data.Reconstruct() == QUDA_RECONSTRUCT_12){
//printfQuda("QUDA_RECONSTRUCT_12\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge;
rtn = computeValue<Float, 3, functiontype>(Gauge(data), data);
} else if(data.Reconstruct() == QUDA_RECONSTRUCT_8){
//printfQuda("QUDA_RECONSTRUCT_8\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge;
rtn = computeValue<Float, 3, functiontype>(Gauge(data), data);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
return rtn;
}
#endif // GPU_GAUGE_ALG
/** @brief Calculate the Determinant
*
* @param[in] data Gauge field
* @returns double2 complex Determinant value
*/
double2 getLinkDeterminant( cudaGaugeField& data) {
double2 det = make_double2(0.0,0.0);
#ifdef GPU_GAUGE_ALG
if (data.Precision() == QUDA_SINGLE_PRECISION) {
det = computeValue<float, 0> (data);
} else if(data.Precision() == QUDA_DOUBLE_PRECISION) {
det = computeValue<double, 0>(data);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Pure gauge code has not been built");
#endif // GPU_GAUGE_ALG
return det;
}
/** @brief Calculate the Trace
*
* @param[in] data Gauge field
* @returns double2 complex trace value
*/
double2 getLinkTrace( cudaGaugeField& data) {
double2 det = make_double2(0.0,0.0);
#ifdef GPU_GAUGE_ALG
if (data.Precision() == QUDA_SINGLE_PRECISION) {
det = computeValue<float, 1> (data);
} else if(data.Precision() == QUDA_DOUBLE_PRECISION) {
det = computeValue<double, 1>(data);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Pure gauge code has not been built");
#endif // GPU_GAUGE_ALG
return det;
}
} // namespace quda
|
cdd8fc8358a084f184a75bbe21a37e97e344fd70.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
//function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
//int N = 1<<20; //1M elements
int N = 100; //100 elements
//Allocate Unified Memory -- accessible from CPU or GPU
float *x, *y;
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
//initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the CPU
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, N, x, y);
//Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
|
cdd8fc8358a084f184a75bbe21a37e97e344fd70.cu
|
#include <iostream>
#include <math.h>
//function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
//int N = 1<<20; //1M elements
int N = 100; //100 elements
//Allocate Unified Memory -- accessible from CPU or GPU
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
//initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the CPU
add<<<1,1>>>(N, x, y);
//Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
vector_reduction.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <time.h>
__global__ void vectorReduce(const float *global_input_data, float *global_output_data, int numElements)
{
__shared__ float sdata[1024];
__shared__ int sindice[1024];
int tid = threadIdx.x;
int i = blockIdx.x * (blockDim.x ) + threadIdx.x;
sdata[tid] = global_input_data[i];
sindice[tid] = tid;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s ) {
if (sdata[tid] > sdata[tid + s]) {
sdata[tid] = sdata[tid + s];
sindice[tid] = sindice[tid + s];
}
__syncthreads();
}
}
__syncthreads();
if (tid == 0) {
global_output_data[0] = sdata[0];
}
if (tid == 1) {
global_output_data[1] = sindice[0];
}
}
/// Functions Propotypes
//Get CUDA Platform Info
void get_CUDAinfo();
//Check CUDA Errors
bool check (hipError_t error );
/// Host main routine
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
get_CUDAinfo();
clock_t start, end;
double time_gpu;
// Print the vector length to be used, and compute its size
int numElements = 1024;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
//Vectors on RAM
float *h_A = (float *)malloc(size);
float *h_C = (float *)malloc(size);
if(!init_vectors_CPU(h_A,h_B,numElements))printf( "Failed to init vectors!\n");
//Vectors on GPU Memory
float *d_A = NULL;
float *d_C = NULL;
if (!check( hipMalloc((void **)&d_A, size)))
{
printf( "Failed to allocate device vector A (error code %s)!\n");
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
return 0;
}
if (!check(hipMalloc((void **)&d_C, size)))
{
printf("Failed to allocate device vector C (error code %s)!\n");
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
return 0;
}
printf("Copy input data from the host memory to the CUDA device\n");
if (!check(hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice)))
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
return 0;
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 16;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
start = clock();
hipLaunchKernelGGL(( vectorReduce), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_C, numElements);
end = clock();
time_gpu= (double ) (end - start) / CLOCKS_PER_SEC * 1000;
if (!check(hipGetLastError()))
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
return 0;
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
if (!check(hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost)))
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
return 0;
}
printf("Time GPU: %lf\n", time_gpu);
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
return 0;
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
return 0;
}
// Free host memory
free(h_A);
free(h_C);
printf("Done\n");
return 0;
}
/// Functions
void get_CUDAinfo (){
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
bool init_vectors_CPU (float *arr_A,float *arr_B,int elements){
// Verify that allocations succeeded
if (arr_A == NULL || arr_B == NULL )
{
fprintf(stderr, "Failed to allocate host vectors!\n");
return 0;
}
// Initialize the host input vectors
for (int i = 0; i < elements; ++i)
{
arr_A[i] = rand()/(float)RAND_MAX;
arr_B[i] = rand()/(float)RAND_MAX;
}
return 1;
}
bool pedir_memoriaGPU(float *arr_A,float *arr_B,float *arr_C,size_t d_size){
// Allocate the device input vector A
hipError_t err;
err = hipMalloc((void **) &arr_A, d_size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
return 0;
}
// Allocate the device input vector B
err = hipMalloc((void **)&arr_B, d_size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
return 0;
}
// Allocate the device output vector C
err = hipMalloc((void **)&arr_C, d_size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
return 0;
}
return 1;
}
bool add_cpu(float *arr_A,float *arr_B,float *arr_C,int elements){
for (int i = 0; i < elements; ++i)
{
arr_C[i]= arr_A[i] + arr_B[i];
}
return 1;
}
bool check_addition(float *arr_A,float *arr_B,float *arr_C,int elements){
for (int i = 0; i < elements; ++i)
{
if (fabs(arr_A[i] + arr_B[i] - arr_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
return 0;
}
}
return 1;
}
bool check (hipError_t error ){
if (error != hipSuccess) return 0;
//printf ("Error checkeado\n");
return 1;
}
|
vector_reduction.cu
|
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <time.h>
__global__ void vectorReduce(const float *global_input_data, float *global_output_data, int numElements)
{
__shared__ float sdata[1024];
__shared__ int sindice[1024];
int tid = threadIdx.x;
int i = blockIdx.x * (blockDim.x ) + threadIdx.x;
sdata[tid] = global_input_data[i];
sindice[tid] = tid;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s ) {
if (sdata[tid] > sdata[tid + s]) {
sdata[tid] = sdata[tid + s];
sindice[tid] = sindice[tid + s];
}
__syncthreads();
}
}
__syncthreads();
if (tid == 0) {
global_output_data[0] = sdata[0];
}
if (tid == 1) {
global_output_data[1] = sindice[0];
}
}
/// Functions Propotypes
//Get CUDA Platform Info
void get_CUDAinfo();
//Check CUDA Errors
bool check (cudaError_t error );
/// Host main routine
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
get_CUDAinfo();
clock_t start, end;
double time_gpu;
// Print the vector length to be used, and compute its size
int numElements = 1024;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
//Vectors on RAM
float *h_A = (float *)malloc(size);
float *h_C = (float *)malloc(size);
if(!init_vectors_CPU(h_A,h_B,numElements))printf( "Failed to init vectors!\n");
//Vectors on GPU Memory
float *d_A = NULL;
float *d_C = NULL;
if (!check( cudaMalloc((void **)&d_A, size)))
{
printf( "Failed to allocate device vector A (error code %s)!\n");
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
if (!check(cudaMalloc((void **)&d_C, size)))
{
printf("Failed to allocate device vector C (error code %s)!\n");
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
printf("Copy input data from the host memory to the CUDA device\n");
if (!check(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice)))
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 16;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
start = clock();
vectorReduce<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_C, numElements);
end = clock();
time_gpu= (double ) (end - start) / CLOCKS_PER_SEC * 1000;
if (!check(cudaGetLastError()))
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
if (!check(cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost)))
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
printf("Time GPU: %lf\n", time_gpu);
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
// Free host memory
free(h_A);
free(h_C);
printf("Done\n");
return 0;
}
/// Functions
void get_CUDAinfo (){
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
bool init_vectors_CPU (float *arr_A,float *arr_B,int elements){
// Verify that allocations succeeded
if (arr_A == NULL || arr_B == NULL )
{
fprintf(stderr, "Failed to allocate host vectors!\n");
return 0;
}
// Initialize the host input vectors
for (int i = 0; i < elements; ++i)
{
arr_A[i] = rand()/(float)RAND_MAX;
arr_B[i] = rand()/(float)RAND_MAX;
}
return 1;
}
bool pedir_memoriaGPU(float *arr_A,float *arr_B,float *arr_C,size_t d_size){
// Allocate the device input vector A
cudaError_t err;
err = cudaMalloc((void **) &arr_A, d_size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
// Allocate the device input vector B
err = cudaMalloc((void **)&arr_B, d_size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
// Allocate the device output vector C
err = cudaMalloc((void **)&arr_C, d_size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
bool add_cpu(float *arr_A,float *arr_B,float *arr_C,int elements){
for (int i = 0; i < elements; ++i)
{
arr_C[i]= arr_A[i] + arr_B[i];
}
return 1;
}
bool check_addition(float *arr_A,float *arr_B,float *arr_C,int elements){
for (int i = 0; i < elements; ++i)
{
if (fabs(arr_A[i] + arr_B[i] - arr_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
return 0;
}
}
return 1;
}
bool check (cudaError_t error ){
if (error != cudaSuccess) return 0;
//printf ("Error checkeado\n");
return 1;
}
|
bc2fe6365125dd6f730734d4f50978b1595cf93e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@precisions normal d
*/
#include "common_magma.h"
#include "commonblas_d.h"
__global__ void
dgemm_kernel_a_0(double *C, const double *A, const double *B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y *16;
const int idt = ty * 16 + tx;
C += ibx +idt +__mul24(iby,ldc);
ibx = ibx+idt - m ;
if( (iby+16)>=n) {
lda = n-iby;
}
else {
lda = 16;
}
if( ibx >= 0 )
lda = 0 ;
else lda = lda ;
switch(lda){
case 16:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
C[ 9*ldc] *=beta;
C[10*ldc] *=beta;
C[11*ldc] *=beta;
C[12*ldc] *=beta;
C[13*ldc] *=beta;
C[14*ldc] *=beta;;
C[15*ldc] *=beta;
break;
case 0:
break;
case 15:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
C[ 9*ldc] *=beta;
C[10*ldc] *=beta;
C[11*ldc] *=beta;
C[12*ldc] *=beta;
C[13*ldc] *=beta;
C[14*ldc] *=beta;
break;
case 14:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
C[ 9*ldc] *=beta;
C[10*ldc] *=beta;
C[11*ldc] *=beta;
C[12*ldc] *=beta;
C[13*ldc] *=beta;
break;
case 13:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
C[ 9*ldc] *=beta;
C[10*ldc] *=beta;
C[11*ldc] *=beta;
C[12*ldc] *=beta;
break;
case 12:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
C[ 9*ldc] *=beta;
C[10*ldc] *=beta;
C[11*ldc] *=beta;
break;
case 11:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
C[ 9*ldc] *=beta;
C[10*ldc] *=beta;
break;
case 10:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
C[ 9*ldc] *=beta;
break;
case 9:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
break;
case 8:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
break;
case 7:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
break;
case 6:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
break;
case 5:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
break;
case 4:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
break;
case 3:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
break;
case 2:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
break;
case 1:
C[ 0 ] *=beta;
break;
}
}
extern "C" void
magmablas_dgemm_kernel_a_0(double *C, const double *A, const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta)
{
dim3 threads( 16, 4 );
dim3 grid(m/64+(m%64!=0),n/16+(n%16!=0));
hipLaunchKernelGGL(( dgemm_kernel_a_0), dim3(grid), dim3(threads), 0, magma_stream , C, A, B,
m, n, k,
lda, ldb, ldc,
alpha, beta ) ;
}
|
bc2fe6365125dd6f730734d4f50978b1595cf93e.cu
|
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@precisions normal d
*/
#include "common_magma.h"
#include "commonblas_d.h"
__global__ void
dgemm_kernel_a_0(double *C, const double *A, const double *B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y *16;
const int idt = ty * 16 + tx;
C += ibx +idt +__mul24(iby,ldc);
ibx = ibx+idt - m ;
if( (iby+16)>=n) {
lda = n-iby;
}
else {
lda = 16;
}
if( ibx >= 0 )
lda = 0 ;
else lda = lda ;
switch(lda){
case 16:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
C[ 9*ldc] *=beta;
C[10*ldc] *=beta;
C[11*ldc] *=beta;
C[12*ldc] *=beta;
C[13*ldc] *=beta;
C[14*ldc] *=beta;;
C[15*ldc] *=beta;
break;
case 0:
break;
case 15:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
C[ 9*ldc] *=beta;
C[10*ldc] *=beta;
C[11*ldc] *=beta;
C[12*ldc] *=beta;
C[13*ldc] *=beta;
C[14*ldc] *=beta;
break;
case 14:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
C[ 9*ldc] *=beta;
C[10*ldc] *=beta;
C[11*ldc] *=beta;
C[12*ldc] *=beta;
C[13*ldc] *=beta;
break;
case 13:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
C[ 9*ldc] *=beta;
C[10*ldc] *=beta;
C[11*ldc] *=beta;
C[12*ldc] *=beta;
break;
case 12:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
C[ 9*ldc] *=beta;
C[10*ldc] *=beta;
C[11*ldc] *=beta;
break;
case 11:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
C[ 9*ldc] *=beta;
C[10*ldc] *=beta;
break;
case 10:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
C[ 9*ldc] *=beta;
break;
case 9:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
C[ 8*ldc] *=beta;
break;
case 8:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
C[ 7*ldc] *=beta;
break;
case 7:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
C[ 6*ldc] *=beta;
break;
case 6:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
C[ 5*ldc] *=beta;
break;
case 5:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
C[ 4*ldc] *=beta;
break;
case 4:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
C[ 3*ldc] *=beta;
break;
case 3:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
C[ 2*ldc] *=beta;
break;
case 2:
C[ 0 ] *=beta;
C[ 1*ldc] *=beta;
break;
case 1:
C[ 0 ] *=beta;
break;
}
}
extern "C" void
magmablas_dgemm_kernel_a_0(double *C, const double *A, const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta)
{
dim3 threads( 16, 4 );
dim3 grid(m/64+(m%64!=0),n/16+(n%16!=0));
dgemm_kernel_a_0<<< grid, threads, 0, magma_stream >>>(C, A, B,
m, n, k,
lda, ldb, ldc,
alpha, beta ) ;
}
|
ffec13857a8e505a9c713245629832cb2e39f5da.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_PdV_kernel_nopredict [17][2];
static int dims_PdV_kernel_nopredict_h [17][2] = {0};
//user function
__device__
void PdV_kernel_nopredict_gpu(const ACC<double> &xarea,
const ACC<double> &xvel0,
const ACC<double> &xvel1,
const ACC<double> &yarea,
const ACC<double> &yvel0,
const ACC<double> &yvel1,
ACC<double> &volume_change,
const ACC<double> &volume,
const ACC<double> &pressure,
const ACC<double> &density0,
ACC<double> &density1,
const ACC<double> &viscosity,
const ACC<double> &energy0,
ACC<double> &energy1,
const ACC<double> &zarea,
const ACC<double> &zvel0,
const ACC<double> &zvel1) {
double recip_volume, energy_change;
double right_flux, left_flux, top_flux, bottom_flux, back_flux, front_flux, total_flux;
left_flux = ( xarea(0,0,0) * ( xvel0(0,0,0) + xvel0(0,1,0) +
xvel0(0,0,1) + xvel0(0,1,1) +
xvel1(0,0,0) + xvel1(0,1,0) +
xvel1(0,0,1) + xvel1(0,1,1) ) ) * 0.125 * dt;
right_flux = ( xarea(1,0,0) * ( xvel0(1,0,0) + xvel0(1,1,0) +
xvel0(1,0,1) + xvel0(1,1,1) +
xvel1(1,0,0) + xvel1(1,1,0) +
xvel1(1,0,1) + xvel1(1,1,1) ) ) * 0.125 * dt;
bottom_flux = ( yarea(0,0,0) * ( yvel0(0,0,0) + yvel0(1,0,0) +
yvel0(0,0,1) + yvel0(1,0,1) +
yvel1(0,0,0) + yvel1(1,0,0) +
yvel1(0,0,1) + yvel1(1,0,1) ) ) * 0.125* dt;
top_flux = ( yarea(0,1,0) * ( yvel0(0,1,0) + yvel0(1,1,0) +
yvel0(0,1,1) + yvel0(1,1,1) +
yvel1(0,1,0) + yvel1(1,1,0) +
yvel1(0,1,1) + yvel1(1,1,1)) ) * 0.125 * dt;
back_flux = ( zarea(0,0,0) * ( zvel0(0,0,0) + zvel0(1,0,0) +
zvel0(0,1,0) + zvel0(1,1,0) +
zvel1(0,0,0) + zvel1(1,0,0) +
zvel1(0,1,0) + zvel1(1,1,0) ) ) * 0.125* dt;
front_flux = ( zarea(0,0,1) * ( zvel0(0,0,1) + zvel0(1,0,1) +
zvel0(0,1,1) + zvel0(1,1,1) +
zvel1(0,0,1) + zvel1(1,0,1) +
zvel1(0,1,1) + zvel1(1,1,1)) ) * 0.125 * dt;
total_flux = right_flux - left_flux + top_flux - bottom_flux + front_flux - back_flux;
volume_change(0,0,0) = (volume(0,0,0))/(volume(0,0,0) + total_flux);
recip_volume = 1.0/volume(0,0,0);
energy_change = ( pressure(0,0,0)/density0(0,0,0) +
viscosity(0,0,0)/density0(0,0,0) ) * total_flux * recip_volume;
energy1(0,0,0) = energy0(0,0,0) - energy_change;
density1(0,0,0) = density0(0,0,0) * volume_change(0,0,0);
}
__global__ void ops_PdV_kernel_nopredict(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
double* __restrict arg8,
double* __restrict arg9,
double* __restrict arg10,
double* __restrict arg11,
double* __restrict arg12,
double* __restrict arg13,
double* __restrict arg14,
double* __restrict arg15,
double* __restrict arg16,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[0][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[0][0] * dims_PdV_kernel_nopredict[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[1][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[1][0] * dims_PdV_kernel_nopredict[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[2][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[2][0] * dims_PdV_kernel_nopredict[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[3][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[3][0] * dims_PdV_kernel_nopredict[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[4][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[4][0] * dims_PdV_kernel_nopredict[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[5][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[5][0] * dims_PdV_kernel_nopredict[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[6][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[6][0] * dims_PdV_kernel_nopredict[6][1];
arg7 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[7][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[7][0] * dims_PdV_kernel_nopredict[7][1];
arg8 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[8][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[8][0] * dims_PdV_kernel_nopredict[8][1];
arg9 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[9][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[9][0] * dims_PdV_kernel_nopredict[9][1];
arg10 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[10][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[10][0] * dims_PdV_kernel_nopredict[10][1];
arg11 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[11][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[11][0] * dims_PdV_kernel_nopredict[11][1];
arg12 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[12][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[12][0] * dims_PdV_kernel_nopredict[12][1];
arg13 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[13][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[13][0] * dims_PdV_kernel_nopredict[13][1];
arg14 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[14][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[14][0] * dims_PdV_kernel_nopredict[14][1];
arg15 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[15][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[15][0] * dims_PdV_kernel_nopredict[15][1];
arg16 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[16][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[16][0] * dims_PdV_kernel_nopredict[16][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
const ACC<double> argp0(dims_PdV_kernel_nopredict[0][0], dims_PdV_kernel_nopredict[0][1], arg0);
const ACC<double> argp1(dims_PdV_kernel_nopredict[1][0], dims_PdV_kernel_nopredict[1][1], arg1);
const ACC<double> argp2(dims_PdV_kernel_nopredict[2][0], dims_PdV_kernel_nopredict[2][1], arg2);
const ACC<double> argp3(dims_PdV_kernel_nopredict[3][0], dims_PdV_kernel_nopredict[3][1], arg3);
const ACC<double> argp4(dims_PdV_kernel_nopredict[4][0], dims_PdV_kernel_nopredict[4][1], arg4);
const ACC<double> argp5(dims_PdV_kernel_nopredict[5][0], dims_PdV_kernel_nopredict[5][1], arg5);
ACC<double> argp6(dims_PdV_kernel_nopredict[6][0], dims_PdV_kernel_nopredict[6][1], arg6);
const ACC<double> argp7(dims_PdV_kernel_nopredict[7][0], dims_PdV_kernel_nopredict[7][1], arg7);
const ACC<double> argp8(dims_PdV_kernel_nopredict[8][0], dims_PdV_kernel_nopredict[8][1], arg8);
const ACC<double> argp9(dims_PdV_kernel_nopredict[9][0], dims_PdV_kernel_nopredict[9][1], arg9);
ACC<double> argp10(dims_PdV_kernel_nopredict[10][0], dims_PdV_kernel_nopredict[10][1], arg10);
const ACC<double> argp11(dims_PdV_kernel_nopredict[11][0], dims_PdV_kernel_nopredict[11][1], arg11);
const ACC<double> argp12(dims_PdV_kernel_nopredict[12][0], dims_PdV_kernel_nopredict[12][1], arg12);
ACC<double> argp13(dims_PdV_kernel_nopredict[13][0], dims_PdV_kernel_nopredict[13][1], arg13);
const ACC<double> argp14(dims_PdV_kernel_nopredict[14][0], dims_PdV_kernel_nopredict[14][1], arg14);
const ACC<double> argp15(dims_PdV_kernel_nopredict[15][0], dims_PdV_kernel_nopredict[15][1], arg15);
const ACC<double> argp16(dims_PdV_kernel_nopredict[16][0], dims_PdV_kernel_nopredict[16][1], arg16);
PdV_kernel_nopredict_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, argp7, argp8,
argp9, argp10, argp11, argp12, argp13,
argp14, argp15, argp16);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_PdV_kernel_nopredict(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13,
ops_arg arg14, ops_arg arg15, ops_arg arg16) {
#else
void ops_par_loop_PdV_kernel_nopredict_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
ops_arg arg11 = desc->args[11];
ops_arg arg12 = desc->args[12];
ops_arg arg13 = desc->args[13];
ops_arg arg14 = desc->args[14];
ops_arg arg15 = desc->args[15];
ops_arg arg16 = desc->args[16];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[17] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,17,range,102)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(102,"PdV_kernel_nopredict");
OPS_kernels[102].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 17,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0];
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0];
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0];
int ydim10 = args[10].dat->size[1];
int xdim11 = args[11].dat->size[0];
int ydim11 = args[11].dat->size[1];
int xdim12 = args[12].dat->size[0];
int ydim12 = args[12].dat->size[1];
int xdim13 = args[13].dat->size[0];
int ydim13 = args[13].dat->size[1];
int xdim14 = args[14].dat->size[0];
int ydim14 = args[14].dat->size[1];
int xdim15 = args[15].dat->size[0];
int ydim15 = args[15].dat->size[1];
int xdim16 = args[16].dat->size[0];
int ydim16 = args[16].dat->size[1];
if (xdim0 != dims_PdV_kernel_nopredict_h[0][0] || ydim0 != dims_PdV_kernel_nopredict_h[0][1] || xdim1 != dims_PdV_kernel_nopredict_h[1][0] || ydim1 != dims_PdV_kernel_nopredict_h[1][1] || xdim2 != dims_PdV_kernel_nopredict_h[2][0] || ydim2 != dims_PdV_kernel_nopredict_h[2][1] || xdim3 != dims_PdV_kernel_nopredict_h[3][0] || ydim3 != dims_PdV_kernel_nopredict_h[3][1] || xdim4 != dims_PdV_kernel_nopredict_h[4][0] || ydim4 != dims_PdV_kernel_nopredict_h[4][1] || xdim5 != dims_PdV_kernel_nopredict_h[5][0] || ydim5 != dims_PdV_kernel_nopredict_h[5][1] || xdim6 != dims_PdV_kernel_nopredict_h[6][0] || ydim6 != dims_PdV_kernel_nopredict_h[6][1] || xdim7 != dims_PdV_kernel_nopredict_h[7][0] || ydim7 != dims_PdV_kernel_nopredict_h[7][1] || xdim8 != dims_PdV_kernel_nopredict_h[8][0] || ydim8 != dims_PdV_kernel_nopredict_h[8][1] || xdim9 != dims_PdV_kernel_nopredict_h[9][0] || ydim9 != dims_PdV_kernel_nopredict_h[9][1] || xdim10 != dims_PdV_kernel_nopredict_h[10][0] || ydim10 != dims_PdV_kernel_nopredict_h[10][1] || xdim11 != dims_PdV_kernel_nopredict_h[11][0] || ydim11 != dims_PdV_kernel_nopredict_h[11][1] || xdim12 != dims_PdV_kernel_nopredict_h[12][0] || ydim12 != dims_PdV_kernel_nopredict_h[12][1] || xdim13 != dims_PdV_kernel_nopredict_h[13][0] || ydim13 != dims_PdV_kernel_nopredict_h[13][1] || xdim14 != dims_PdV_kernel_nopredict_h[14][0] || ydim14 != dims_PdV_kernel_nopredict_h[14][1] || xdim15 != dims_PdV_kernel_nopredict_h[15][0] || ydim15 != dims_PdV_kernel_nopredict_h[15][1] || xdim16 != dims_PdV_kernel_nopredict_h[16][0] || ydim16 != dims_PdV_kernel_nopredict_h[16][1]) {
dims_PdV_kernel_nopredict_h[0][0] = xdim0;
dims_PdV_kernel_nopredict_h[0][1] = ydim0;
dims_PdV_kernel_nopredict_h[1][0] = xdim1;
dims_PdV_kernel_nopredict_h[1][1] = ydim1;
dims_PdV_kernel_nopredict_h[2][0] = xdim2;
dims_PdV_kernel_nopredict_h[2][1] = ydim2;
dims_PdV_kernel_nopredict_h[3][0] = xdim3;
dims_PdV_kernel_nopredict_h[3][1] = ydim3;
dims_PdV_kernel_nopredict_h[4][0] = xdim4;
dims_PdV_kernel_nopredict_h[4][1] = ydim4;
dims_PdV_kernel_nopredict_h[5][0] = xdim5;
dims_PdV_kernel_nopredict_h[5][1] = ydim5;
dims_PdV_kernel_nopredict_h[6][0] = xdim6;
dims_PdV_kernel_nopredict_h[6][1] = ydim6;
dims_PdV_kernel_nopredict_h[7][0] = xdim7;
dims_PdV_kernel_nopredict_h[7][1] = ydim7;
dims_PdV_kernel_nopredict_h[8][0] = xdim8;
dims_PdV_kernel_nopredict_h[8][1] = ydim8;
dims_PdV_kernel_nopredict_h[9][0] = xdim9;
dims_PdV_kernel_nopredict_h[9][1] = ydim9;
dims_PdV_kernel_nopredict_h[10][0] = xdim10;
dims_PdV_kernel_nopredict_h[10][1] = ydim10;
dims_PdV_kernel_nopredict_h[11][0] = xdim11;
dims_PdV_kernel_nopredict_h[11][1] = ydim11;
dims_PdV_kernel_nopredict_h[12][0] = xdim12;
dims_PdV_kernel_nopredict_h[12][1] = ydim12;
dims_PdV_kernel_nopredict_h[13][0] = xdim13;
dims_PdV_kernel_nopredict_h[13][1] = ydim13;
dims_PdV_kernel_nopredict_h[14][0] = xdim14;
dims_PdV_kernel_nopredict_h[14][1] = ydim14;
dims_PdV_kernel_nopredict_h[15][0] = xdim15;
dims_PdV_kernel_nopredict_h[15][1] = ydim15;
dims_PdV_kernel_nopredict_h[16][0] = xdim16;
dims_PdV_kernel_nopredict_h[16][1] = ydim16;
cutilSafeCall(hipMemcpyToSymbol( dims_PdV_kernel_nopredict, dims_PdV_kernel_nopredict_h, sizeof(dims_PdV_kernel_nopredict)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size);
int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size);
int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size);
int dat11 = (OPS_soa ? args[11].dat->type_size : args[11].dat->elem_size);
int dat12 = (OPS_soa ? args[12].dat->type_size : args[12].dat->elem_size);
int dat13 = (OPS_soa ? args[13].dat->type_size : args[13].dat->elem_size);
int dat14 = (OPS_soa ? args[14].dat->type_size : args[14].dat->elem_size);
int dat15 = (OPS_soa ? args[15].dat->type_size : args[15].dat->elem_size);
int dat16 = (OPS_soa ? args[16].dat->type_size : args[16].dat->elem_size);
char *p_a[17];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
int base8 = args[8].dat->base_offset +
dat8 * 1 * (start[0] * args[8].stencil->stride[0]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
(start[1] * args[8].stencil->stride[1]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2]);
p_a[8] = (char *)args[8].data_d + base8;
int base9 = args[9].dat->base_offset +
dat9 * 1 * (start[0] * args[9].stencil->stride[0]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
(start[1] * args[9].stencil->stride[1]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2]);
p_a[9] = (char *)args[9].data_d + base9;
int base10 = args[10].dat->base_offset +
dat10 * 1 * (start[0] * args[10].stencil->stride[0]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
(start[1] * args[10].stencil->stride[1]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2]);
p_a[10] = (char *)args[10].data_d + base10;
int base11 = args[11].dat->base_offset +
dat11 * 1 * (start[0] * args[11].stencil->stride[0]);
base11 = base11+ dat11 *
args[11].dat->size[0] *
(start[1] * args[11].stencil->stride[1]);
base11 = base11+ dat11 *
args[11].dat->size[0] *
args[11].dat->size[1] *
(start[2] * args[11].stencil->stride[2]);
p_a[11] = (char *)args[11].data_d + base11;
int base12 = args[12].dat->base_offset +
dat12 * 1 * (start[0] * args[12].stencil->stride[0]);
base12 = base12+ dat12 *
args[12].dat->size[0] *
(start[1] * args[12].stencil->stride[1]);
base12 = base12+ dat12 *
args[12].dat->size[0] *
args[12].dat->size[1] *
(start[2] * args[12].stencil->stride[2]);
p_a[12] = (char *)args[12].data_d + base12;
int base13 = args[13].dat->base_offset +
dat13 * 1 * (start[0] * args[13].stencil->stride[0]);
base13 = base13+ dat13 *
args[13].dat->size[0] *
(start[1] * args[13].stencil->stride[1]);
base13 = base13+ dat13 *
args[13].dat->size[0] *
args[13].dat->size[1] *
(start[2] * args[13].stencil->stride[2]);
p_a[13] = (char *)args[13].data_d + base13;
int base14 = args[14].dat->base_offset +
dat14 * 1 * (start[0] * args[14].stencil->stride[0]);
base14 = base14+ dat14 *
args[14].dat->size[0] *
(start[1] * args[14].stencil->stride[1]);
base14 = base14+ dat14 *
args[14].dat->size[0] *
args[14].dat->size[1] *
(start[2] * args[14].stencil->stride[2]);
p_a[14] = (char *)args[14].data_d + base14;
int base15 = args[15].dat->base_offset +
dat15 * 1 * (start[0] * args[15].stencil->stride[0]);
base15 = base15+ dat15 *
args[15].dat->size[0] *
(start[1] * args[15].stencil->stride[1]);
base15 = base15+ dat15 *
args[15].dat->size[0] *
args[15].dat->size[1] *
(start[2] * args[15].stencil->stride[2]);
p_a[15] = (char *)args[15].data_d + base15;
int base16 = args[16].dat->base_offset +
dat16 * 1 * (start[0] * args[16].stencil->stride[0]);
base16 = base16+ dat16 *
args[16].dat->size[0] *
(start[1] * args[16].stencil->stride[1]);
base16 = base16+ dat16 *
args[16].dat->size[0] *
args[16].dat->size[1] *
(start[2] * args[16].stencil->stride[2]);
p_a[16] = (char *)args[16].data_d + base16;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 17);
ops_halo_exchanges(args,17,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[102].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_PdV_kernel_nopredict), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9],
(double *)p_a[10], (double *)p_a[11],
(double *)p_a[12], (double *)p_a[13],
(double *)p_a[14], (double *)p_a[15],
(double *)p_a[16],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[102].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 17);
ops_set_halo_dirtybit3(&args[6],range);
ops_set_halo_dirtybit3(&args[10],range);
ops_set_halo_dirtybit3(&args[13],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[102].mpi_time += t2-t1;
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg10);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg11);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg12);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg13);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg14);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg15);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg16);
}
}
#ifdef OPS_LAZY
void ops_par_loop_PdV_kernel_nopredict(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13, ops_arg arg14, ops_arg arg15, ops_arg arg16) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 102;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 102;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 17;
desc->args = (ops_arg*)malloc(17*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->args[8] = arg8;
desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index;
desc->args[9] = arg9;
desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index;
desc->args[10] = arg10;
desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index;
desc->args[11] = arg11;
desc->hash = ((desc->hash << 5) + desc->hash) + arg11.dat->index;
desc->args[12] = arg12;
desc->hash = ((desc->hash << 5) + desc->hash) + arg12.dat->index;
desc->args[13] = arg13;
desc->hash = ((desc->hash << 5) + desc->hash) + arg13.dat->index;
desc->args[14] = arg14;
desc->hash = ((desc->hash << 5) + desc->hash) + arg14.dat->index;
desc->args[15] = arg15;
desc->hash = ((desc->hash << 5) + desc->hash) + arg15.dat->index;
desc->args[16] = arg16;
desc->hash = ((desc->hash << 5) + desc->hash) + arg16.dat->index;
desc->function = ops_par_loop_PdV_kernel_nopredict_execute;
if (OPS_diags > 1) {
ops_timing_realloc(102,"PdV_kernel_nopredict");
}
ops_enqueue_kernel(desc);
}
#endif
|
ffec13857a8e505a9c713245629832cb2e39f5da.cu
|
//
// auto-generated by ops.py
//
__constant__ int dims_PdV_kernel_nopredict [17][2];
static int dims_PdV_kernel_nopredict_h [17][2] = {0};
//user function
__device__
void PdV_kernel_nopredict_gpu(const ACC<double> &xarea,
const ACC<double> &xvel0,
const ACC<double> &xvel1,
const ACC<double> &yarea,
const ACC<double> &yvel0,
const ACC<double> &yvel1,
ACC<double> &volume_change,
const ACC<double> &volume,
const ACC<double> &pressure,
const ACC<double> &density0,
ACC<double> &density1,
const ACC<double> &viscosity,
const ACC<double> &energy0,
ACC<double> &energy1,
const ACC<double> &zarea,
const ACC<double> &zvel0,
const ACC<double> &zvel1) {
double recip_volume, energy_change;
double right_flux, left_flux, top_flux, bottom_flux, back_flux, front_flux, total_flux;
left_flux = ( xarea(0,0,0) * ( xvel0(0,0,0) + xvel0(0,1,0) +
xvel0(0,0,1) + xvel0(0,1,1) +
xvel1(0,0,0) + xvel1(0,1,0) +
xvel1(0,0,1) + xvel1(0,1,1) ) ) * 0.125 * dt;
right_flux = ( xarea(1,0,0) * ( xvel0(1,0,0) + xvel0(1,1,0) +
xvel0(1,0,1) + xvel0(1,1,1) +
xvel1(1,0,0) + xvel1(1,1,0) +
xvel1(1,0,1) + xvel1(1,1,1) ) ) * 0.125 * dt;
bottom_flux = ( yarea(0,0,0) * ( yvel0(0,0,0) + yvel0(1,0,0) +
yvel0(0,0,1) + yvel0(1,0,1) +
yvel1(0,0,0) + yvel1(1,0,0) +
yvel1(0,0,1) + yvel1(1,0,1) ) ) * 0.125* dt;
top_flux = ( yarea(0,1,0) * ( yvel0(0,1,0) + yvel0(1,1,0) +
yvel0(0,1,1) + yvel0(1,1,1) +
yvel1(0,1,0) + yvel1(1,1,0) +
yvel1(0,1,1) + yvel1(1,1,1)) ) * 0.125 * dt;
back_flux = ( zarea(0,0,0) * ( zvel0(0,0,0) + zvel0(1,0,0) +
zvel0(0,1,0) + zvel0(1,1,0) +
zvel1(0,0,0) + zvel1(1,0,0) +
zvel1(0,1,0) + zvel1(1,1,0) ) ) * 0.125* dt;
front_flux = ( zarea(0,0,1) * ( zvel0(0,0,1) + zvel0(1,0,1) +
zvel0(0,1,1) + zvel0(1,1,1) +
zvel1(0,0,1) + zvel1(1,0,1) +
zvel1(0,1,1) + zvel1(1,1,1)) ) * 0.125 * dt;
total_flux = right_flux - left_flux + top_flux - bottom_flux + front_flux - back_flux;
volume_change(0,0,0) = (volume(0,0,0))/(volume(0,0,0) + total_flux);
recip_volume = 1.0/volume(0,0,0);
energy_change = ( pressure(0,0,0)/density0(0,0,0) +
viscosity(0,0,0)/density0(0,0,0) ) * total_flux * recip_volume;
energy1(0,0,0) = energy0(0,0,0) - energy_change;
density1(0,0,0) = density0(0,0,0) * volume_change(0,0,0);
}
__global__ void ops_PdV_kernel_nopredict(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
double* __restrict arg8,
double* __restrict arg9,
double* __restrict arg10,
double* __restrict arg11,
double* __restrict arg12,
double* __restrict arg13,
double* __restrict arg14,
double* __restrict arg15,
double* __restrict arg16,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[0][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[0][0] * dims_PdV_kernel_nopredict[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[1][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[1][0] * dims_PdV_kernel_nopredict[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[2][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[2][0] * dims_PdV_kernel_nopredict[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[3][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[3][0] * dims_PdV_kernel_nopredict[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[4][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[4][0] * dims_PdV_kernel_nopredict[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[5][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[5][0] * dims_PdV_kernel_nopredict[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[6][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[6][0] * dims_PdV_kernel_nopredict[6][1];
arg7 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[7][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[7][0] * dims_PdV_kernel_nopredict[7][1];
arg8 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[8][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[8][0] * dims_PdV_kernel_nopredict[8][1];
arg9 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[9][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[9][0] * dims_PdV_kernel_nopredict[9][1];
arg10 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[10][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[10][0] * dims_PdV_kernel_nopredict[10][1];
arg11 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[11][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[11][0] * dims_PdV_kernel_nopredict[11][1];
arg12 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[12][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[12][0] * dims_PdV_kernel_nopredict[12][1];
arg13 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[13][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[13][0] * dims_PdV_kernel_nopredict[13][1];
arg14 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[14][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[14][0] * dims_PdV_kernel_nopredict[14][1];
arg15 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[15][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[15][0] * dims_PdV_kernel_nopredict[15][1];
arg16 += idx_x * 1*1 + idx_y * 1*1 * dims_PdV_kernel_nopredict[16][0] + idx_z * 1*1 * dims_PdV_kernel_nopredict[16][0] * dims_PdV_kernel_nopredict[16][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
const ACC<double> argp0(dims_PdV_kernel_nopredict[0][0], dims_PdV_kernel_nopredict[0][1], arg0);
const ACC<double> argp1(dims_PdV_kernel_nopredict[1][0], dims_PdV_kernel_nopredict[1][1], arg1);
const ACC<double> argp2(dims_PdV_kernel_nopredict[2][0], dims_PdV_kernel_nopredict[2][1], arg2);
const ACC<double> argp3(dims_PdV_kernel_nopredict[3][0], dims_PdV_kernel_nopredict[3][1], arg3);
const ACC<double> argp4(dims_PdV_kernel_nopredict[4][0], dims_PdV_kernel_nopredict[4][1], arg4);
const ACC<double> argp5(dims_PdV_kernel_nopredict[5][0], dims_PdV_kernel_nopredict[5][1], arg5);
ACC<double> argp6(dims_PdV_kernel_nopredict[6][0], dims_PdV_kernel_nopredict[6][1], arg6);
const ACC<double> argp7(dims_PdV_kernel_nopredict[7][0], dims_PdV_kernel_nopredict[7][1], arg7);
const ACC<double> argp8(dims_PdV_kernel_nopredict[8][0], dims_PdV_kernel_nopredict[8][1], arg8);
const ACC<double> argp9(dims_PdV_kernel_nopredict[9][0], dims_PdV_kernel_nopredict[9][1], arg9);
ACC<double> argp10(dims_PdV_kernel_nopredict[10][0], dims_PdV_kernel_nopredict[10][1], arg10);
const ACC<double> argp11(dims_PdV_kernel_nopredict[11][0], dims_PdV_kernel_nopredict[11][1], arg11);
const ACC<double> argp12(dims_PdV_kernel_nopredict[12][0], dims_PdV_kernel_nopredict[12][1], arg12);
ACC<double> argp13(dims_PdV_kernel_nopredict[13][0], dims_PdV_kernel_nopredict[13][1], arg13);
const ACC<double> argp14(dims_PdV_kernel_nopredict[14][0], dims_PdV_kernel_nopredict[14][1], arg14);
const ACC<double> argp15(dims_PdV_kernel_nopredict[15][0], dims_PdV_kernel_nopredict[15][1], arg15);
const ACC<double> argp16(dims_PdV_kernel_nopredict[16][0], dims_PdV_kernel_nopredict[16][1], arg16);
PdV_kernel_nopredict_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, argp7, argp8,
argp9, argp10, argp11, argp12, argp13,
argp14, argp15, argp16);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_PdV_kernel_nopredict(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13,
ops_arg arg14, ops_arg arg15, ops_arg arg16) {
#else
void ops_par_loop_PdV_kernel_nopredict_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
ops_arg arg11 = desc->args[11];
ops_arg arg12 = desc->args[12];
ops_arg arg13 = desc->args[13];
ops_arg arg14 = desc->args[14];
ops_arg arg15 = desc->args[15];
ops_arg arg16 = desc->args[16];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[17] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,17,range,102)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(102,"PdV_kernel_nopredict");
OPS_kernels[102].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 17,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0];
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0];
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0];
int ydim10 = args[10].dat->size[1];
int xdim11 = args[11].dat->size[0];
int ydim11 = args[11].dat->size[1];
int xdim12 = args[12].dat->size[0];
int ydim12 = args[12].dat->size[1];
int xdim13 = args[13].dat->size[0];
int ydim13 = args[13].dat->size[1];
int xdim14 = args[14].dat->size[0];
int ydim14 = args[14].dat->size[1];
int xdim15 = args[15].dat->size[0];
int ydim15 = args[15].dat->size[1];
int xdim16 = args[16].dat->size[0];
int ydim16 = args[16].dat->size[1];
if (xdim0 != dims_PdV_kernel_nopredict_h[0][0] || ydim0 != dims_PdV_kernel_nopredict_h[0][1] || xdim1 != dims_PdV_kernel_nopredict_h[1][0] || ydim1 != dims_PdV_kernel_nopredict_h[1][1] || xdim2 != dims_PdV_kernel_nopredict_h[2][0] || ydim2 != dims_PdV_kernel_nopredict_h[2][1] || xdim3 != dims_PdV_kernel_nopredict_h[3][0] || ydim3 != dims_PdV_kernel_nopredict_h[3][1] || xdim4 != dims_PdV_kernel_nopredict_h[4][0] || ydim4 != dims_PdV_kernel_nopredict_h[4][1] || xdim5 != dims_PdV_kernel_nopredict_h[5][0] || ydim5 != dims_PdV_kernel_nopredict_h[5][1] || xdim6 != dims_PdV_kernel_nopredict_h[6][0] || ydim6 != dims_PdV_kernel_nopredict_h[6][1] || xdim7 != dims_PdV_kernel_nopredict_h[7][0] || ydim7 != dims_PdV_kernel_nopredict_h[7][1] || xdim8 != dims_PdV_kernel_nopredict_h[8][0] || ydim8 != dims_PdV_kernel_nopredict_h[8][1] || xdim9 != dims_PdV_kernel_nopredict_h[9][0] || ydim9 != dims_PdV_kernel_nopredict_h[9][1] || xdim10 != dims_PdV_kernel_nopredict_h[10][0] || ydim10 != dims_PdV_kernel_nopredict_h[10][1] || xdim11 != dims_PdV_kernel_nopredict_h[11][0] || ydim11 != dims_PdV_kernel_nopredict_h[11][1] || xdim12 != dims_PdV_kernel_nopredict_h[12][0] || ydim12 != dims_PdV_kernel_nopredict_h[12][1] || xdim13 != dims_PdV_kernel_nopredict_h[13][0] || ydim13 != dims_PdV_kernel_nopredict_h[13][1] || xdim14 != dims_PdV_kernel_nopredict_h[14][0] || ydim14 != dims_PdV_kernel_nopredict_h[14][1] || xdim15 != dims_PdV_kernel_nopredict_h[15][0] || ydim15 != dims_PdV_kernel_nopredict_h[15][1] || xdim16 != dims_PdV_kernel_nopredict_h[16][0] || ydim16 != dims_PdV_kernel_nopredict_h[16][1]) {
dims_PdV_kernel_nopredict_h[0][0] = xdim0;
dims_PdV_kernel_nopredict_h[0][1] = ydim0;
dims_PdV_kernel_nopredict_h[1][0] = xdim1;
dims_PdV_kernel_nopredict_h[1][1] = ydim1;
dims_PdV_kernel_nopredict_h[2][0] = xdim2;
dims_PdV_kernel_nopredict_h[2][1] = ydim2;
dims_PdV_kernel_nopredict_h[3][0] = xdim3;
dims_PdV_kernel_nopredict_h[3][1] = ydim3;
dims_PdV_kernel_nopredict_h[4][0] = xdim4;
dims_PdV_kernel_nopredict_h[4][1] = ydim4;
dims_PdV_kernel_nopredict_h[5][0] = xdim5;
dims_PdV_kernel_nopredict_h[5][1] = ydim5;
dims_PdV_kernel_nopredict_h[6][0] = xdim6;
dims_PdV_kernel_nopredict_h[6][1] = ydim6;
dims_PdV_kernel_nopredict_h[7][0] = xdim7;
dims_PdV_kernel_nopredict_h[7][1] = ydim7;
dims_PdV_kernel_nopredict_h[8][0] = xdim8;
dims_PdV_kernel_nopredict_h[8][1] = ydim8;
dims_PdV_kernel_nopredict_h[9][0] = xdim9;
dims_PdV_kernel_nopredict_h[9][1] = ydim9;
dims_PdV_kernel_nopredict_h[10][0] = xdim10;
dims_PdV_kernel_nopredict_h[10][1] = ydim10;
dims_PdV_kernel_nopredict_h[11][0] = xdim11;
dims_PdV_kernel_nopredict_h[11][1] = ydim11;
dims_PdV_kernel_nopredict_h[12][0] = xdim12;
dims_PdV_kernel_nopredict_h[12][1] = ydim12;
dims_PdV_kernel_nopredict_h[13][0] = xdim13;
dims_PdV_kernel_nopredict_h[13][1] = ydim13;
dims_PdV_kernel_nopredict_h[14][0] = xdim14;
dims_PdV_kernel_nopredict_h[14][1] = ydim14;
dims_PdV_kernel_nopredict_h[15][0] = xdim15;
dims_PdV_kernel_nopredict_h[15][1] = ydim15;
dims_PdV_kernel_nopredict_h[16][0] = xdim16;
dims_PdV_kernel_nopredict_h[16][1] = ydim16;
cutilSafeCall(cudaMemcpyToSymbol( dims_PdV_kernel_nopredict, dims_PdV_kernel_nopredict_h, sizeof(dims_PdV_kernel_nopredict)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size);
int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size);
int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size);
int dat11 = (OPS_soa ? args[11].dat->type_size : args[11].dat->elem_size);
int dat12 = (OPS_soa ? args[12].dat->type_size : args[12].dat->elem_size);
int dat13 = (OPS_soa ? args[13].dat->type_size : args[13].dat->elem_size);
int dat14 = (OPS_soa ? args[14].dat->type_size : args[14].dat->elem_size);
int dat15 = (OPS_soa ? args[15].dat->type_size : args[15].dat->elem_size);
int dat16 = (OPS_soa ? args[16].dat->type_size : args[16].dat->elem_size);
char *p_a[17];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
int base8 = args[8].dat->base_offset +
dat8 * 1 * (start[0] * args[8].stencil->stride[0]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
(start[1] * args[8].stencil->stride[1]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2]);
p_a[8] = (char *)args[8].data_d + base8;
int base9 = args[9].dat->base_offset +
dat9 * 1 * (start[0] * args[9].stencil->stride[0]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
(start[1] * args[9].stencil->stride[1]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2]);
p_a[9] = (char *)args[9].data_d + base9;
int base10 = args[10].dat->base_offset +
dat10 * 1 * (start[0] * args[10].stencil->stride[0]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
(start[1] * args[10].stencil->stride[1]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2]);
p_a[10] = (char *)args[10].data_d + base10;
int base11 = args[11].dat->base_offset +
dat11 * 1 * (start[0] * args[11].stencil->stride[0]);
base11 = base11+ dat11 *
args[11].dat->size[0] *
(start[1] * args[11].stencil->stride[1]);
base11 = base11+ dat11 *
args[11].dat->size[0] *
args[11].dat->size[1] *
(start[2] * args[11].stencil->stride[2]);
p_a[11] = (char *)args[11].data_d + base11;
int base12 = args[12].dat->base_offset +
dat12 * 1 * (start[0] * args[12].stencil->stride[0]);
base12 = base12+ dat12 *
args[12].dat->size[0] *
(start[1] * args[12].stencil->stride[1]);
base12 = base12+ dat12 *
args[12].dat->size[0] *
args[12].dat->size[1] *
(start[2] * args[12].stencil->stride[2]);
p_a[12] = (char *)args[12].data_d + base12;
int base13 = args[13].dat->base_offset +
dat13 * 1 * (start[0] * args[13].stencil->stride[0]);
base13 = base13+ dat13 *
args[13].dat->size[0] *
(start[1] * args[13].stencil->stride[1]);
base13 = base13+ dat13 *
args[13].dat->size[0] *
args[13].dat->size[1] *
(start[2] * args[13].stencil->stride[2]);
p_a[13] = (char *)args[13].data_d + base13;
int base14 = args[14].dat->base_offset +
dat14 * 1 * (start[0] * args[14].stencil->stride[0]);
base14 = base14+ dat14 *
args[14].dat->size[0] *
(start[1] * args[14].stencil->stride[1]);
base14 = base14+ dat14 *
args[14].dat->size[0] *
args[14].dat->size[1] *
(start[2] * args[14].stencil->stride[2]);
p_a[14] = (char *)args[14].data_d + base14;
int base15 = args[15].dat->base_offset +
dat15 * 1 * (start[0] * args[15].stencil->stride[0]);
base15 = base15+ dat15 *
args[15].dat->size[0] *
(start[1] * args[15].stencil->stride[1]);
base15 = base15+ dat15 *
args[15].dat->size[0] *
args[15].dat->size[1] *
(start[2] * args[15].stencil->stride[2]);
p_a[15] = (char *)args[15].data_d + base15;
int base16 = args[16].dat->base_offset +
dat16 * 1 * (start[0] * args[16].stencil->stride[0]);
base16 = base16+ dat16 *
args[16].dat->size[0] *
(start[1] * args[16].stencil->stride[1]);
base16 = base16+ dat16 *
args[16].dat->size[0] *
args[16].dat->size[1] *
(start[2] * args[16].stencil->stride[2]);
p_a[16] = (char *)args[16].data_d + base16;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 17);
ops_halo_exchanges(args,17,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[102].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_PdV_kernel_nopredict<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9],
(double *)p_a[10], (double *)p_a[11],
(double *)p_a[12], (double *)p_a[13],
(double *)p_a[14], (double *)p_a[15],
(double *)p_a[16],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[102].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 17);
ops_set_halo_dirtybit3(&args[6],range);
ops_set_halo_dirtybit3(&args[10],range);
ops_set_halo_dirtybit3(&args[13],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[102].mpi_time += t2-t1;
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg10);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg11);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg12);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg13);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg14);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg15);
OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg16);
}
}
#ifdef OPS_LAZY
void ops_par_loop_PdV_kernel_nopredict(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13, ops_arg arg14, ops_arg arg15, ops_arg arg16) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 102;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 102;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 17;
desc->args = (ops_arg*)malloc(17*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->args[8] = arg8;
desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index;
desc->args[9] = arg9;
desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index;
desc->args[10] = arg10;
desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index;
desc->args[11] = arg11;
desc->hash = ((desc->hash << 5) + desc->hash) + arg11.dat->index;
desc->args[12] = arg12;
desc->hash = ((desc->hash << 5) + desc->hash) + arg12.dat->index;
desc->args[13] = arg13;
desc->hash = ((desc->hash << 5) + desc->hash) + arg13.dat->index;
desc->args[14] = arg14;
desc->hash = ((desc->hash << 5) + desc->hash) + arg14.dat->index;
desc->args[15] = arg15;
desc->hash = ((desc->hash << 5) + desc->hash) + arg15.dat->index;
desc->args[16] = arg16;
desc->hash = ((desc->hash << 5) + desc->hash) + arg16.dat->index;
desc->function = ops_par_loop_PdV_kernel_nopredict_execute;
if (OPS_diags > 1) {
ops_timing_realloc(102,"PdV_kernel_nopredict");
}
ops_enqueue_kernel(desc);
}
#endif
|
a7af18660b3f542f972570854cccb39888da6ee8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "config.h"
#include <cstddef>
#include "CudaMemory.cuh"
#include "cuda_macro.h"
#include <cstring>
#define CUDA_EVENT 0x1201
/*! \brief Move the memory into device
*
* \return true if the memory is correctly flushed
*
*/
bool CudaMemory::flush()
{
if (hm != NULL && dm != NULL)
{
//! copy from host to device memory
CUDA_SAFE_CALL(hipMemcpy(dm,hm,sz,hipMemcpyHostToDevice));
}
return true;
}
/*! \brief Allocate a chunk of memory
*
* Allocate a chunk of memory
*
* \param sz size of the chunk of memory to allocate in byte
*
*/
bool CudaMemory::allocate(size_t sz)
{
//! Allocate the device memory
if (dm == NULL)
{CUDA_SAFE_CALL(hipMalloc(&dm,sz));}
else
{
if (sz != this->sz)
{
std::cout << __FILE__ << ":" << __LINE__ << " error FATAL: using allocate to resize the memory, please use resize." << std::endl;
return false;
}
}
this->sz = sz;
#ifdef FILL_CUDA_MEMORY_WITH_MINUS_ONE
CUDA_SAFE_CALL(hipMemset(dm,-1,sz))
#endif
return true;
}
/*! \brief destroy a chunk of memory
*
* Destroy a chunk of memory
*
*/
void CudaMemory::destroy()
{
if (dm != NULL)
{
//! Release the allocated memory
CUDA_SAFE_CALL(hipFree(dm));
dm = NULL;
}
if (hm != NULL)
{
//! we invalidate hm
CUDA_SAFE_CALL(hipHostFree(hm));
#ifdef SE_CLASS2
//! remove hm
check_delete(hm);
#endif
hm = NULL;
}
sz = 0;
}
/*! \brief Allocate the host buffer
*
* Allocate the host buffer
*
*/
void CudaMemory::allocate_host(size_t sz) const
{
if (hm == NULL)
{
CUDA_SAFE_CALL(hipHostMalloc(&hm,sz,hipHostMallocMapped))
#ifdef SE_CLASS2
//! add hm to the list of allocated memory
check_new(hm,sz,CUDA_EVENT,0);
#endif
}
}
/*! \brief copy the data from a pointer
*
* copy the data from a pointer
*
* \param ptr
* \return true if success
*/
bool CudaMemory::copyFromPointer(const void * ptr)
{
// check if we have a host buffer, if not allocate it
allocate_host(sz);
// get the device pointer
void * dvp;
CUDA_SAFE_CALL(hipHostGetDevicePointer(&dvp,hm,0));
// memory copy
memcpy(dvp,ptr,sz);
return true;
}
/*! \brief copy from device to device
*
* copy a piece of memory from device to device
*
* \param CudaMemory from where to copy
*
* \return true is success
*/
bool CudaMemory::copyDeviceToDevice(const CudaMemory & m)
{
//! The source buffer is too big to copy it
if (m.sz > sz)
{
std::cerr << "Error " << __LINE__ << __FILE__ << ": source buffer is too big to copy";
return false;
}
//! Copy the memory
CUDA_SAFE_CALL(hipMemcpy(dm,m.dm,m.sz,hipMemcpyDeviceToDevice));
return true;
}
/*! \brief copy from memory
*
* copy from memory
*
* \param m a memory interface
*
*/
bool CudaMemory::copy(const memory & m)
{
//! Here we try to cast memory into OpenFPMwdeviceCudaMemory
const CudaMemory * ofpm = dynamic_cast<const CudaMemory *>(&m);
//! if we fail we get the pointer and simply copy from the pointer
if (ofpm == NULL)
{
// copy the memory from device to host and from host to device
return copyFromPointer(m.getPointer());
}
else
{
// they are the same memory type, use cuda/thrust buffer copy
return copyDeviceToDevice(*ofpm);
}
}
/*! \brief Get the size of the allocated memory
*
* Get the size of the allocated memory
*
* \return the size of the allocated memory
*
*/
size_t CudaMemory::size() const
{
return sz;
}
/*! \brief Resize the allocated memory
*
* Resize the allocated memory, if request is smaller than the allocated memory
* is not resized
*
* \param sz size
* \return true if the resize operation complete correctly
*
*/
bool CudaMemory::resize(size_t sz)
{
// if the allocated memory is enough, do not resize
if (sz <= CudaMemory::size())
return true;
//! Allocate the device memory if not done yet
if (CudaMemory::size() == 0)
{return allocate(sz);}
//! Create a new buffer, if sz is bigger than the actual size
void * thm = NULL;
//! Create a new buffer, if sz is bigger than the actual size
void * tdm = NULL;
if (dm != NULL)
{
if (this->sz < sz)
{
CUDA_SAFE_CALL(hipMalloc(&tdm,sz));
#ifdef FILL_CUDA_MEMORY_WITH_MINUS_ONE
CUDA_SAFE_CALL(hipMemset(tdm,-1,sz));
#endif
}
//! copy from the old buffer to the new one
CUDA_SAFE_CALL(hipMemcpy(tdm,dm,CudaMemory::size(),hipMemcpyDeviceToDevice));
}
if (hm != NULL)
{
if (this->sz < sz)
CUDA_SAFE_CALL(hipHostMalloc(&thm,sz,hipHostMallocMapped));
//! copy from the old buffer to the new one
CUDA_SAFE_CALL(hipMemcpy(thm,hm,CudaMemory::size(),hipMemcpyHostToHost));
}
//! free the old buffer
destroy();
dm = tdm;
hm = thm;
//! change to the new buffer
this->sz = sz;
return true;
}
/*! \brief Return a readable pointer with your data
*
* \return a readable pointer with your data
*
*/
void * CudaMemory::getPointer()
{
// allocate an host memory if not allocated
if (hm == NULL)
allocate_host(sz);
return hm;
}
/*! \brief Return a readable pointer with your data
*
* \return a readable pointer with your data
*
*/
void CudaMemory::deviceToHost()
{
// allocate an host memory if not allocated
if (hm == NULL)
allocate_host(sz);
//! copy from device to host memory
CUDA_SAFE_CALL(hipMemcpy(hm,dm,sz,hipMemcpyDeviceToHost));
}
/*! \brief It transfer to device memory from the host of another memory
*
* \param mem the other memory object
*
*/
void CudaMemory::hostToDevice(CudaMemory & mem)
{
// allocate an host memory if not allocated
if (mem.hm == NULL)
mem.allocate_host(sz);
if (mem.sz > sz)
{resize(mem.sz);}
//! copy from device to host memory
CUDA_SAFE_CALL(hipMemcpy(dm,mem.hm,mem.sz,hipMemcpyHostToDevice));
}
void CudaMemory::hostToDevice(size_t start, size_t stop)
{
// allocate an host memory if not allocated
if (hm == NULL)
allocate_host(sz);
//! copy from device to host memory
CUDA_SAFE_CALL(hipMemcpy(((unsigned char *)dm)+start,((unsigned char *)hm)+start,(stop-start),hipMemcpyHostToDevice));
}
/*! \brief Return a readable pointer with your data
*
* \return a readable pointer with your data
*
*/
void CudaMemory::deviceToHost(size_t start, size_t stop)
{
// allocate an host memory if not allocated
if (hm == NULL)
allocate_host(sz);
//! copy from device to host memory
CUDA_SAFE_CALL(hipMemcpy(((unsigned char *)hm)+start,((unsigned char *)dm)+start,(stop-start),hipMemcpyDeviceToHost));
}
/*! \brief Return a readable pointer with your data
*
* \return a readable pointer with your data
*
*/
const void * CudaMemory::getPointer() const
{
// allocate an host memory if not allocated
if (hm == NULL)
allocate_host(sz);
return hm;
}
/*! \brief fill host and device memory with the selected byte
*
*
*/
void CudaMemory::fill(unsigned char c)
{
CUDA_SAFE_CALL(hipMemset(dm,c,size()));
if (hm != NULL)
{memset(hm,c,size());}
}
/*! \brief Return the CUDA device pointer
*
* \return CUDA device pointer
*
*/
void * CudaMemory::getDevicePointer()
{
return dm;
}
/*! \brief Return a readable pointer with your data
*
* \return a readable pointer with your data
*
*/
void CudaMemory::hostToDevice()
{
// allocate an host memory if not allocated
if (hm == NULL)
allocate_host(sz);
//! copy from device to host memory
CUDA_SAFE_CALL(hipMemcpy(dm,hm,sz,hipMemcpyHostToDevice));
}
/*! \brief Swap the memory
*
* \param mem memory to swap
*
*/
void CudaMemory::swap(CudaMemory & mem)
{
size_t sz_tmp;
void * dm_tmp;
long int ref_cnt_tmp;
bool is_hm_sync_tmp;
void * hm_tmp;
hm_tmp = hm;
is_hm_sync_tmp = is_hm_sync;
sz_tmp = sz;
dm_tmp = dm;
ref_cnt_tmp = ref_cnt;
hm = mem.hm;
is_hm_sync = mem.is_hm_sync;
sz = mem.sz;
dm = mem.dm;
ref_cnt = mem.ref_cnt;
mem.hm = hm_tmp;
mem.is_hm_sync = is_hm_sync_tmp;
mem.sz = sz_tmp;
mem.dm = dm_tmp;
mem.ref_cnt = ref_cnt_tmp;
}
|
a7af18660b3f542f972570854cccb39888da6ee8.cu
|
#include "config.h"
#include <cstddef>
#include "CudaMemory.cuh"
#include "cuda_macro.h"
#include <cstring>
#define CUDA_EVENT 0x1201
/*! \brief Move the memory into device
*
* \return true if the memory is correctly flushed
*
*/
bool CudaMemory::flush()
{
if (hm != NULL && dm != NULL)
{
//! copy from host to device memory
CUDA_SAFE_CALL(cudaMemcpy(dm,hm,sz,cudaMemcpyHostToDevice));
}
return true;
}
/*! \brief Allocate a chunk of memory
*
* Allocate a chunk of memory
*
* \param sz size of the chunk of memory to allocate in byte
*
*/
bool CudaMemory::allocate(size_t sz)
{
//! Allocate the device memory
if (dm == NULL)
{CUDA_SAFE_CALL(cudaMalloc(&dm,sz));}
else
{
if (sz != this->sz)
{
std::cout << __FILE__ << ":" << __LINE__ << " error FATAL: using allocate to resize the memory, please use resize." << std::endl;
return false;
}
}
this->sz = sz;
#ifdef FILL_CUDA_MEMORY_WITH_MINUS_ONE
CUDA_SAFE_CALL(cudaMemset(dm,-1,sz))
#endif
return true;
}
/*! \brief destroy a chunk of memory
*
* Destroy a chunk of memory
*
*/
void CudaMemory::destroy()
{
if (dm != NULL)
{
//! Release the allocated memory
CUDA_SAFE_CALL(cudaFree(dm));
dm = NULL;
}
if (hm != NULL)
{
//! we invalidate hm
CUDA_SAFE_CALL(cudaFreeHost(hm));
#ifdef SE_CLASS2
//! remove hm
check_delete(hm);
#endif
hm = NULL;
}
sz = 0;
}
/*! \brief Allocate the host buffer
*
* Allocate the host buffer
*
*/
void CudaMemory::allocate_host(size_t sz) const
{
if (hm == NULL)
{
CUDA_SAFE_CALL(cudaHostAlloc(&hm,sz,cudaHostAllocMapped))
#ifdef SE_CLASS2
//! add hm to the list of allocated memory
check_new(hm,sz,CUDA_EVENT,0);
#endif
}
}
/*! \brief copy the data from a pointer
*
* copy the data from a pointer
*
* \param ptr
* \return true if success
*/
bool CudaMemory::copyFromPointer(const void * ptr)
{
// check if we have a host buffer, if not allocate it
allocate_host(sz);
// get the device pointer
void * dvp;
CUDA_SAFE_CALL(cudaHostGetDevicePointer(&dvp,hm,0));
// memory copy
memcpy(dvp,ptr,sz);
return true;
}
/*! \brief copy from device to device
*
* copy a piece of memory from device to device
*
* \param CudaMemory from where to copy
*
* \return true is success
*/
bool CudaMemory::copyDeviceToDevice(const CudaMemory & m)
{
//! The source buffer is too big to copy it
if (m.sz > sz)
{
std::cerr << "Error " << __LINE__ << __FILE__ << ": source buffer is too big to copy";
return false;
}
//! Copy the memory
CUDA_SAFE_CALL(cudaMemcpy(dm,m.dm,m.sz,cudaMemcpyDeviceToDevice));
return true;
}
/*! \brief copy from memory
*
* copy from memory
*
* \param m a memory interface
*
*/
bool CudaMemory::copy(const memory & m)
{
//! Here we try to cast memory into OpenFPMwdeviceCudaMemory
const CudaMemory * ofpm = dynamic_cast<const CudaMemory *>(&m);
//! if we fail we get the pointer and simply copy from the pointer
if (ofpm == NULL)
{
// copy the memory from device to host and from host to device
return copyFromPointer(m.getPointer());
}
else
{
// they are the same memory type, use cuda/thrust buffer copy
return copyDeviceToDevice(*ofpm);
}
}
/*! \brief Get the size of the allocated memory
*
* Get the size of the allocated memory
*
* \return the size of the allocated memory
*
*/
size_t CudaMemory::size() const
{
return sz;
}
/*! \brief Resize the allocated memory
*
* Resize the allocated memory, if request is smaller than the allocated memory
* is not resized
*
* \param sz size
* \return true if the resize operation complete correctly
*
*/
bool CudaMemory::resize(size_t sz)
{
// if the allocated memory is enough, do not resize
if (sz <= CudaMemory::size())
return true;
//! Allocate the device memory if not done yet
if (CudaMemory::size() == 0)
{return allocate(sz);}
//! Create a new buffer, if sz is bigger than the actual size
void * thm = NULL;
//! Create a new buffer, if sz is bigger than the actual size
void * tdm = NULL;
if (dm != NULL)
{
if (this->sz < sz)
{
CUDA_SAFE_CALL(cudaMalloc(&tdm,sz));
#ifdef FILL_CUDA_MEMORY_WITH_MINUS_ONE
CUDA_SAFE_CALL(cudaMemset(tdm,-1,sz));
#endif
}
//! copy from the old buffer to the new one
CUDA_SAFE_CALL(cudaMemcpy(tdm,dm,CudaMemory::size(),cudaMemcpyDeviceToDevice));
}
if (hm != NULL)
{
if (this->sz < sz)
CUDA_SAFE_CALL(cudaHostAlloc(&thm,sz,cudaHostAllocMapped));
//! copy from the old buffer to the new one
CUDA_SAFE_CALL(cudaMemcpy(thm,hm,CudaMemory::size(),cudaMemcpyHostToHost));
}
//! free the old buffer
destroy();
dm = tdm;
hm = thm;
//! change to the new buffer
this->sz = sz;
return true;
}
/*! \brief Return a readable pointer with your data
*
* \return a readable pointer with your data
*
*/
void * CudaMemory::getPointer()
{
// allocate an host memory if not allocated
if (hm == NULL)
allocate_host(sz);
return hm;
}
/*! \brief Return a readable pointer with your data
*
* \return a readable pointer with your data
*
*/
void CudaMemory::deviceToHost()
{
// allocate an host memory if not allocated
if (hm == NULL)
allocate_host(sz);
//! copy from device to host memory
CUDA_SAFE_CALL(cudaMemcpy(hm,dm,sz,cudaMemcpyDeviceToHost));
}
/*! \brief It transfer to device memory from the host of another memory
*
* \param mem the other memory object
*
*/
void CudaMemory::hostToDevice(CudaMemory & mem)
{
// allocate an host memory if not allocated
if (mem.hm == NULL)
mem.allocate_host(sz);
if (mem.sz > sz)
{resize(mem.sz);}
//! copy from device to host memory
CUDA_SAFE_CALL(cudaMemcpy(dm,mem.hm,mem.sz,cudaMemcpyHostToDevice));
}
void CudaMemory::hostToDevice(size_t start, size_t stop)
{
// allocate an host memory if not allocated
if (hm == NULL)
allocate_host(sz);
//! copy from device to host memory
CUDA_SAFE_CALL(cudaMemcpy(((unsigned char *)dm)+start,((unsigned char *)hm)+start,(stop-start),cudaMemcpyHostToDevice));
}
/*! \brief Return a readable pointer with your data
*
* \return a readable pointer with your data
*
*/
void CudaMemory::deviceToHost(size_t start, size_t stop)
{
// allocate an host memory if not allocated
if (hm == NULL)
allocate_host(sz);
//! copy from device to host memory
CUDA_SAFE_CALL(cudaMemcpy(((unsigned char *)hm)+start,((unsigned char *)dm)+start,(stop-start),cudaMemcpyDeviceToHost));
}
/*! \brief Return a readable pointer with your data
*
* \return a readable pointer with your data
*
*/
const void * CudaMemory::getPointer() const
{
// allocate an host memory if not allocated
if (hm == NULL)
allocate_host(sz);
return hm;
}
/*! \brief fill host and device memory with the selected byte
*
*
*/
void CudaMemory::fill(unsigned char c)
{
CUDA_SAFE_CALL(cudaMemset(dm,c,size()));
if (hm != NULL)
{memset(hm,c,size());}
}
/*! \brief Return the CUDA device pointer
*
* \return CUDA device pointer
*
*/
void * CudaMemory::getDevicePointer()
{
return dm;
}
/*! \brief Return a readable pointer with your data
*
* \return a readable pointer with your data
*
*/
void CudaMemory::hostToDevice()
{
// allocate an host memory if not allocated
if (hm == NULL)
allocate_host(sz);
//! copy from device to host memory
CUDA_SAFE_CALL(cudaMemcpy(dm,hm,sz,cudaMemcpyHostToDevice));
}
/*! \brief Swap the memory
*
* \param mem memory to swap
*
*/
void CudaMemory::swap(CudaMemory & mem)
{
size_t sz_tmp;
void * dm_tmp;
long int ref_cnt_tmp;
bool is_hm_sync_tmp;
void * hm_tmp;
hm_tmp = hm;
is_hm_sync_tmp = is_hm_sync;
sz_tmp = sz;
dm_tmp = dm;
ref_cnt_tmp = ref_cnt;
hm = mem.hm;
is_hm_sync = mem.is_hm_sync;
sz = mem.sz;
dm = mem.dm;
ref_cnt = mem.ref_cnt;
mem.hm = hm_tmp;
mem.is_hm_sync = is_hm_sync_tmp;
mem.sz = sz_tmp;
mem.dm = dm_tmp;
mem.ref_cnt = ref_cnt_tmp;
}
|
9ce9d0688186ff43f8c0a1db64f5756a0801e41e.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2020 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO LICENSEE:
*
* This source code and/or documentation ("Licensed Deliverables") are
* subject to NVIDIA intellectual property rights under U.S. and
* international Copyright laws.
*
* These Licensed Deliverables contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a form of NVIDIA software license agreement by and
* between NVIDIA and Licensee ("License Agreement") or electronically
* accepted by Licensee. Notwithstanding any terms or conditions to
* the contrary in the License Agreement, reproduction or disclosure
* of the Licensed Deliverables to any third party without the express
* written consent of NVIDIA is prohibited.
*
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THESE LICENSED DELIVERABLES.
*
* U.S. Government End Users. These Licensed Deliverables are a
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
* 1995), consisting of "commercial computer software" and "commercial
* computer software documentation" as such terms are used in 48
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the Licensed Deliverables with
* only those rights set forth herein.
*
* Any use of the Licensed Deliverables in individual and commercial
* software must include, in the user documentation and internal
* comments to the code, the above Disclaimer and U.S. Government End
* Users Notice.
*/
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include "cublas_utils.h"
using data_type = double;
int main(int argc, char *argv[]) {
hipblasHandle_t cublasH = NULL;
hipStream_t stream = NULL;
/*
* A = | 1.0 2.0 3.0 4.0 |
* B = | 5.0 6.0 7.0 8.0 |
*/
const std::vector<data_type> A = {1.0, 2.0, 3.0, 4.0};
std::vector<data_type> B = {5.0, 6.0, 7.0, 8.0};
const data_type alpha = 2.1;
const int incx = 1;
const int incy = 1;
data_type *d_A = nullptr;
data_type *d_B = nullptr;
printf("A\n");
print_vector(A.size(), A.data());
printf("=====\n");
printf("B\n");
print_vector(B.size(), B.data());
printf("=====\n");
/* step 1: create cublas handle, bind a stream */
CUBLAS_CHECK(hipblasCreate(&cublasH));
CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
CUBLAS_CHECK(hipblasSetStream(cublasH, stream));
/* step 2: copy data to device */
CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size()));
CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_B), sizeof(data_type) * B.size()));
CUDA_CHECK(hipMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), hipMemcpyHostToDevice,
stream));
CUDA_CHECK(hipMemcpyAsync(d_B, B.data(), sizeof(data_type) * B.size(), hipMemcpyHostToDevice,
stream));
/* step 3: compute */
CUBLAS_CHECK(hipblasDaxpy(cublasH, A.size(), &alpha, d_A, incx, d_B, incy));
/* step 4: copy data to host */
CUDA_CHECK(hipMemcpyAsync(B.data(), d_B, sizeof(data_type) * B.size(), hipMemcpyDeviceToHost,
stream));
CUDA_CHECK(hipStreamSynchronize(stream));
/*
* B = | 7.10 10.20 13.30 16.40 |
*/
printf("B\n");
print_vector(B.size(), B.data());
printf("=====\n");
/* free resources */
CUDA_CHECK(hipFree(d_A));
CUDA_CHECK(hipFree(d_B));
CUBLAS_CHECK(hipblasDestroy(cublasH));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
|
9ce9d0688186ff43f8c0a1db64f5756a0801e41e.cu
|
/*
* Copyright 2020 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO LICENSEE:
*
* This source code and/or documentation ("Licensed Deliverables") are
* subject to NVIDIA intellectual property rights under U.S. and
* international Copyright laws.
*
* These Licensed Deliverables contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a form of NVIDIA software license agreement by and
* between NVIDIA and Licensee ("License Agreement") or electronically
* accepted by Licensee. Notwithstanding any terms or conditions to
* the contrary in the License Agreement, reproduction or disclosure
* of the Licensed Deliverables to any third party without the express
* written consent of NVIDIA is prohibited.
*
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THESE LICENSED DELIVERABLES.
*
* U.S. Government End Users. These Licensed Deliverables are a
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
* 1995), consisting of "commercial computer software" and "commercial
* computer software documentation" as such terms are used in 48
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the Licensed Deliverables with
* only those rights set forth herein.
*
* Any use of the Licensed Deliverables in individual and commercial
* software must include, in the user documentation and internal
* comments to the code, the above Disclaimer and U.S. Government End
* Users Notice.
*/
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include "cublas_utils.h"
using data_type = double;
int main(int argc, char *argv[]) {
cublasHandle_t cublasH = NULL;
cudaStream_t stream = NULL;
/*
* A = | 1.0 2.0 3.0 4.0 |
* B = | 5.0 6.0 7.0 8.0 |
*/
const std::vector<data_type> A = {1.0, 2.0, 3.0, 4.0};
std::vector<data_type> B = {5.0, 6.0, 7.0, 8.0};
const data_type alpha = 2.1;
const int incx = 1;
const int incy = 1;
data_type *d_A = nullptr;
data_type *d_B = nullptr;
printf("A\n");
print_vector(A.size(), A.data());
printf("=====\n");
printf("B\n");
print_vector(B.size(), B.data());
printf("=====\n");
/* step 1: create cublas handle, bind a stream */
CUBLAS_CHECK(cublasCreate(&cublasH));
CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
CUBLAS_CHECK(cublasSetStream(cublasH, stream));
/* step 2: copy data to device */
CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size()));
CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_B), sizeof(data_type) * B.size()));
CUDA_CHECK(cudaMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), cudaMemcpyHostToDevice,
stream));
CUDA_CHECK(cudaMemcpyAsync(d_B, B.data(), sizeof(data_type) * B.size(), cudaMemcpyHostToDevice,
stream));
/* step 3: compute */
CUBLAS_CHECK(cublasDaxpy(cublasH, A.size(), &alpha, d_A, incx, d_B, incy));
/* step 4: copy data to host */
CUDA_CHECK(cudaMemcpyAsync(B.data(), d_B, sizeof(data_type) * B.size(), cudaMemcpyDeviceToHost,
stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
/*
* B = | 7.10 10.20 13.30 16.40 |
*/
printf("B\n");
print_vector(B.size(), B.data());
printf("=====\n");
/* free resources */
CUDA_CHECK(cudaFree(d_A));
CUDA_CHECK(cudaFree(d_B));
CUBLAS_CHECK(cublasDestroy(cublasH));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
c920829cfa6223581a700b894c08d59b76beed91.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "bboxUtils.h"
#include "hipcub/hipcub.hpp"
#include "cub_helper.h"
#include "kernel.h"
template <typename T_SCORE>
pluginStatus_t sortScoresPerImage_gpu(
hipStream_t stream, const int num_images, const int num_items_per_image,
void *unsorted_scores, void *unsorted_bbox_indices, void *sorted_scores,
void *sorted_bbox_indices, void *workspace) {
void *d_offsets = workspace;
void *cubWorkspace =
nextWorkspacePtr((int8_t *)d_offsets, (num_images + 1) * sizeof(int));
setUniformOffsets(stream, num_images, num_items_per_image, (int *)d_offsets);
const int arrayLen = num_images * num_items_per_image;
size_t temp_storage_bytes =
cubSortPairsWorkspaceSize<T_SCORE, int>(arrayLen, num_images);
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
cubWorkspace, temp_storage_bytes, (const T_SCORE *)(unsorted_scores),
(T_SCORE *)(sorted_scores), (const int *)(unsorted_bbox_indices),
(int *)(sorted_bbox_indices), arrayLen, num_images,
(const int *)d_offsets, (const int *)d_offsets + 1, 0,
sizeof(T_SCORE) * 8, stream);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// sortScoresPerImage LAUNCH CONFIG
typedef pluginStatus_t (*sspiFunc)(hipStream_t, const int, const int, void *,
void *, void *, void *, void *);
struct sspiLaunchConfig {
DataType t_score;
sspiFunc function;
sspiLaunchConfig(DataType t_score) : t_score(t_score) {}
sspiLaunchConfig(DataType t_score, sspiFunc function)
: t_score(t_score), function(function) {}
bool operator==(const sspiLaunchConfig &other) {
return t_score == other.t_score;
}
};
static std::vector<sspiLaunchConfig> sspiFuncVec;
bool sspiInit() {
sspiFuncVec.push_back(
sspiLaunchConfig(DataType::kFLOAT, sortScoresPerImage_gpu<float>));
return true;
}
static bool initialized = sspiInit();
pluginStatus_t sortScoresPerImage(
hipStream_t stream, const int num_images, const int num_items_per_image,
const DataType DT_SCORE, void *unsorted_scores, void *unsorted_bbox_indices,
void *sorted_scores, void *sorted_bbox_indices, void *workspace) {
sspiLaunchConfig lc = sspiLaunchConfig(DT_SCORE);
for (unsigned i = 0; i < sspiFuncVec.size(); ++i) {
if (lc == sspiFuncVec[i]) {
DEBUG_PRINTF("sortScoresPerImage kernel %d\n", i);
return sspiFuncVec[i].function(
stream, num_images, num_items_per_image, unsorted_scores,
unsorted_bbox_indices, sorted_scores, sorted_bbox_indices, workspace);
}
}
return STATUS_BAD_PARAM;
}
size_t sortScoresPerImageWorkspaceSize(const int num_images,
const int num_items_per_image,
const DataType DT_SCORE) {
const int arrayLen = num_images * num_items_per_image;
size_t wss[2];
wss[0] = (num_images + 1) * sizeof(int); // offsets
if (DT_SCORE == DataType::kFLOAT) {
wss[1] =
cubSortPairsWorkspaceSize<float, int>(arrayLen,
num_images); // cub workspace
} else {
printf("SCORE type not supported.\n");
return (size_t)-1;
}
return calculateTotalWorkspaceSize(wss, 2);
}
|
c920829cfa6223581a700b894c08d59b76beed91.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "bboxUtils.h"
#include "cub/cub.cuh"
#include "cub_helper.h"
#include "kernel.h"
template <typename T_SCORE>
pluginStatus_t sortScoresPerImage_gpu(
cudaStream_t stream, const int num_images, const int num_items_per_image,
void *unsorted_scores, void *unsorted_bbox_indices, void *sorted_scores,
void *sorted_bbox_indices, void *workspace) {
void *d_offsets = workspace;
void *cubWorkspace =
nextWorkspacePtr((int8_t *)d_offsets, (num_images + 1) * sizeof(int));
setUniformOffsets(stream, num_images, num_items_per_image, (int *)d_offsets);
const int arrayLen = num_images * num_items_per_image;
size_t temp_storage_bytes =
cubSortPairsWorkspaceSize<T_SCORE, int>(arrayLen, num_images);
cub::DeviceSegmentedRadixSort::SortPairsDescending(
cubWorkspace, temp_storage_bytes, (const T_SCORE *)(unsorted_scores),
(T_SCORE *)(sorted_scores), (const int *)(unsorted_bbox_indices),
(int *)(sorted_bbox_indices), arrayLen, num_images,
(const int *)d_offsets, (const int *)d_offsets + 1, 0,
sizeof(T_SCORE) * 8, stream);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// sortScoresPerImage LAUNCH CONFIG
typedef pluginStatus_t (*sspiFunc)(cudaStream_t, const int, const int, void *,
void *, void *, void *, void *);
struct sspiLaunchConfig {
DataType t_score;
sspiFunc function;
sspiLaunchConfig(DataType t_score) : t_score(t_score) {}
sspiLaunchConfig(DataType t_score, sspiFunc function)
: t_score(t_score), function(function) {}
bool operator==(const sspiLaunchConfig &other) {
return t_score == other.t_score;
}
};
static std::vector<sspiLaunchConfig> sspiFuncVec;
bool sspiInit() {
sspiFuncVec.push_back(
sspiLaunchConfig(DataType::kFLOAT, sortScoresPerImage_gpu<float>));
return true;
}
static bool initialized = sspiInit();
pluginStatus_t sortScoresPerImage(
cudaStream_t stream, const int num_images, const int num_items_per_image,
const DataType DT_SCORE, void *unsorted_scores, void *unsorted_bbox_indices,
void *sorted_scores, void *sorted_bbox_indices, void *workspace) {
sspiLaunchConfig lc = sspiLaunchConfig(DT_SCORE);
for (unsigned i = 0; i < sspiFuncVec.size(); ++i) {
if (lc == sspiFuncVec[i]) {
DEBUG_PRINTF("sortScoresPerImage kernel %d\n", i);
return sspiFuncVec[i].function(
stream, num_images, num_items_per_image, unsorted_scores,
unsorted_bbox_indices, sorted_scores, sorted_bbox_indices, workspace);
}
}
return STATUS_BAD_PARAM;
}
size_t sortScoresPerImageWorkspaceSize(const int num_images,
const int num_items_per_image,
const DataType DT_SCORE) {
const int arrayLen = num_images * num_items_per_image;
size_t wss[2];
wss[0] = (num_images + 1) * sizeof(int); // offsets
if (DT_SCORE == DataType::kFLOAT) {
wss[1] =
cubSortPairsWorkspaceSize<float, int>(arrayLen,
num_images); // cub workspace
} else {
printf("SCORE type not supported.\n");
return (size_t)-1;
}
return calculateTotalWorkspaceSize(wss, 2);
}
|
7a87eeb37fb1c529a4b0dc33726662c28f62dda1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
*/
#include "Calculations.h"
__host__ __device__ double distance2(real3 i, real3 j)
{
return sqrt((i.x-j.x)*(i.x-j.x) + (i.y-j.y)*(i.y-j.y) + (i.z-j.z)*(i.z-j.z));
}
//-------------------------- calculate Force Si ------------------------------//
__global__ void
__launch_bounds__(1024, 4)
d_calculateForce_Si(int MAX_SI_NEIGHBORS, int MAX_XE_NEIGHBORS, particleStruct* siParticles, particleStruct* siParticles2, particleStruct* xeParticles, int numOfSi, int numOfXe, bool USE_NEIGHBOR_LISTS, bool useLennardJonesPotentialForSi)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < numOfSi)
{
real3 iPosition;
real3 jPosition;
real3 kPosition;
double r_ij = 0.0;
double r_ik = 0.0;
double r_jk = 0.0;
iPosition = siParticles[idx].position;
siParticles[idx].force.x = 0.0;
siParticles[idx].force.y = 0.0;
siParticles[idx].force.z = 0.0;
for(int j = 0; j < numOfSi; j++)
{
if(j != idx)
{
jPosition = siParticles2[j].position;
r_ij = distance2(iPosition, jPosition);
if(r_ij/sigma_Si < a_Si)
{
siParticles[idx].force.x -= v2_derivative_of_rix(iPosition, jPosition, r_ij);
siParticles[idx].force.y -= v2_derivative_of_riy(iPosition, jPosition, r_ij);
siParticles[idx].force.z -= v2_derivative_of_riz(iPosition, jPosition, r_ij);
}
for(int k = 0; k < numOfSi; k++)
{
if(k != idx && k != j)
{
kPosition = siParticles2[j].position;
r_ik = distance2(iPosition, kPosition);
r_jk = distance2(jPosition, kPosition);
if((r_ij/sigma_Si < a_Si && r_ik/sigma_Si < a_Si) || (r_ij/sigma_Si < a_Si && r_jk/sigma_Si < a_Si) || (r_ik/sigma_Si < a_Si && r_jk/sigma_Si < a_Si))
{
siParticles[idx].force.x -= v3_derivative_of_rix(iPosition, jPosition, kPosition, r_ij, r_ik, r_jk);
siParticles[idx].force.x -= v3_derivative_of_rix(iPosition, kPosition, jPosition, r_ik, r_ij, r_jk);
siParticles[idx].force.y -= v3_derivative_of_riy(iPosition, jPosition, kPosition, r_ij, r_ik, r_jk);
siParticles[idx].force.y -= v3_derivative_of_riy(iPosition, kPosition, jPosition, r_ik, r_ij, r_jk);
siParticles[idx].force.z -= v3_derivative_of_riz(iPosition, jPosition, kPosition, r_ij, r_ik, r_jk);
siParticles[idx].force.z -= v3_derivative_of_riz(iPosition, kPosition, jPosition, r_ik, r_ij, r_jk);
}
}
}
}
}
}
/* for(int j = 0; j < countXe; j++)
{
jPosition = xeParticles[j].position;
jPosition.x += 0.25*config->SI_LENGTH*space_Si;
jPosition.y += 0.25*config->SI_LENGTH*space_Si;
jPosition.z += config->SI_HEIGHT+config->LA_SPACE;
r_ij = distance2(iPosition, jPosition);
if(r_ij/sigma_Si_Xe < a_Si_Xe)
{
siParticles[i].force.x += ((iPosition.x-jPosition.x)*(lennardJonesForce(r_ij,sigma_Si_Xe,epsilon_Si_Xe)));//-lennardJonesForce(2.5*sigma_Si,sigma_Si_Xe,epsilon_Si_Xe)));
siParticles[i].force.y += ((iPosition.y-jPosition.y)*(lennardJonesForce(r_ij,sigma_Si_Xe,epsilon_Si_Xe)));//-lennardJonesForce(2.5*sigma_Si,sigma_Si_Xe,epsilon_Si_Xe)));
siParticles[i].force.z += ((iPosition.z-jPosition.z)*(lennardJonesForce(r_ij,sigma_Si_Xe,epsilon_Si_Xe)));//-lennardJonesForce(2.5*sigma_Si,sigma_Si_Xe,epsilon_Si_Xe)));
}
}*/
// }
}
//----------------------------------------------------------------------------//
__global__ void
__launch_bounds__(1024, 4)
d_calculateForce_Xe(int MAX_SI_NEIGHBORS, int MAX_XE_NEIGHBORS, particleStruct* xeParticles, particleStruct* xeParticles2, particleStruct* siParticles, int numOfSi, int numOfXe, bool USE_NEIGHBOR_LISTS)
{
// extern __shared__ particleStruct* sharedXe[];
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < numOfXe)
{
real3 iPosition;
real3 jPosition;
double r_ij = 0.0;
iPosition = xeParticles[idx].position;
xeParticles[idx].force.x = 0.0;
xeParticles[idx].force.y = 0.0;
xeParticles[idx].force.z = 0.0;
for(int j = 0; j < MAX_XE_NEIGHBORS && xeParticles[idx].xeNeighbors[j] != -1; j++)
{
if(j != idx)
{
jPosition = xeParticles[xeParticles[idx].xeNeighbors[j]].position;
r_ij = distance2(iPosition, jPosition);
if(r_ij/sigma_Xe_Xe < xe_Cluster)
{
xeParticles[idx].force.x += (iPosition.x-jPosition.x)*lennardJonesForce(r_ij,sigma_Xe_Xe,epsilon_Xe_Xe);
xeParticles[idx].force.y += (iPosition.y-jPosition.y)*lennardJonesForce(r_ij,sigma_Xe_Xe,epsilon_Xe_Xe);
xeParticles[idx].force.z += (iPosition.z-jPosition.z)*lennardJonesForce(r_ij,sigma_Xe_Xe,epsilon_Xe_Xe);
}
}
}
}
/* for(int j = 0; j < countSi; j++)
{
jPosition = siParticles[j].position;
r_ij = distance2(iPosition, jPosition);
if(r_ij/sigma_Si_Xe < a_Si_Xe)
{
xeParticles[i].force.x += (iPosition.x-jPosition.x)*lennardJonesForce(r_ij,sigma_Si_Xe,epsilon_Si_Xe);
xeParticles[i].force.y += (iPosition.y-jPosition.y)*lennardJonesForce(r_ij,sigma_Si_Xe,epsilon_Si_Xe);
xeParticles[i].force.z += (iPosition.z-jPosition.z)*lennardJonesForce(r_ij,sigma_Si_Xe,epsilon_Si_Xe);
}
}*/
// }
}
//////////////////////////////////////////////////////////////////////////////////////////////!!!!
//----------------------calculate total potential of Si -----------------------//
/*__global__ double d_V_total_Si(int SI_PARTICLES, particleStruct* siParticles)
{
}
//----------------------------------------------------------------------------//
__global__ double d_V_total_Si_Xe(int SI_PARTICLES, particleStruct* siParticles, int XE_PARTICLES, particleStruct* xeParticles)
{
}
__global__ double d_V_total_Xe(int XE_PARTICLES, particleStruct* xeParticles)
{
}
__global__ void d_V_total(int SI_PARTICLES, particleStruct* siParticles, int XE_PARTICLES, particleStruct* xeParticles)
{
}*/
//////////////////////////////////////////////////////////////////////////////////////////////!!!!
__global__ void d_initiateAcceleration(particleStruct *particles, int listSize, double mass)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < listSize)
{
particles[idx].aAcc.x = particles[idx].force.x/mass;
particles[idx].aAcc.y = particles[idx].force.y/mass;
particles[idx].aAcc.z = particles[idx].force.z/mass;
}
}
__global__ void d_predict(particleStruct *particles, int listSize, float dt)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double c1;
__shared__ double c2;
__shared__ double c3;
__shared__ double c4;
c1 = dt;
c2 = c1*dt/2.0;
c3 = c2*dt/3.0;
c4 = c3*dt/4.0;
if(idx < listSize)
{
particles[idx].position.x += c1*particles[idx].velocity.x + c2*particles[idx].aAcc.x + c3*particles[idx].bAcc.x + c4*particles[idx].cAcc.x;
particles[idx].position.y += c1*particles[idx].velocity.y + c2*particles[idx].aAcc.y + c3*particles[idx].bAcc.y + c4*particles[idx].cAcc.y;
particles[idx].position.z += c1*particles[idx].velocity.z + c2*particles[idx].aAcc.z + c3*particles[idx].bAcc.z + c4*particles[idx].cAcc.z;
particles[idx].velocity.x += c1*particles[idx].aAcc.x + c2*particles[idx].bAcc.x + c3*particles[idx].cAcc.x;
particles[idx].velocity.y += c1*particles[idx].aAcc.y + c2*particles[idx].bAcc.y + c3*particles[idx].cAcc.y;
particles[idx].velocity.z += c1*particles[idx].aAcc.z + c2*particles[idx].bAcc.z + c3*particles[idx].cAcc.z;
particles[idx].aAcc.x += c1*particles[idx].bAcc.x + c2*particles[idx].cAcc.x;
particles[idx].aAcc.y += c1*particles[idx].bAcc.y + c2*particles[idx].cAcc.y;
particles[idx].aAcc.z += c1*particles[idx].bAcc.z + c2*particles[idx].cAcc.z;
particles[idx].bAcc.x += c1*particles[idx].cAcc.x;
particles[idx].bAcc.y += c1*particles[idx].cAcc.y;
particles[idx].bAcc.z += c1*particles[idx].cAcc.z;
}
}
__global__ void d_correct(particleStruct *particles, double dt, int listSize, double mass)
{
__shared__ double c1;
__shared__ double c2;
__shared__ double c3;
__shared__ double c4;
__shared__ double cr;
__shared__ double cv;
__shared__ double cb;
__shared__ double cc;
c1 = dt ;
c2 = c1*dt/2.0;
c3 = c2*dt/3.0;
c4 = c3*dt/4.0;
cr = GEAR1*c2;
cv = GEAR2*c2/c1;
cb = GEAR3*c2/c3;
cc = GEAR4*c2/c4;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < listSize)
{
double axi = particles[idx].force.x/mass;
double ayi = particles[idx].force.y/mass;
double azi = particles[idx].force.z/mass;
double corrx = axi - particles[idx].aAcc.x;
double corry = ayi - particles[idx].aAcc.y;
double corrz = azi - particles[idx].aAcc.z;
particles[idx].position.x += cr*corrx;
particles[idx].position.y += cr*corry;
particles[idx].position.z += cr*corrz;
particles[idx].velocity.x += cv*corrx;
particles[idx].velocity.y += cv*corry;
particles[idx].velocity.z += cv*corrz;
particles[idx].aAcc.x = axi;
particles[idx].aAcc.y = ayi;
particles[idx].aAcc.z = azi;
particles[idx].bAcc.x += cb*corrx;
particles[idx].bAcc.y += cb*corry;
particles[idx].bAcc.z += cb*corrz;
particles[idx].cAcc.x += cc*corrx;
particles[idx].cAcc.y += cc*corry;
particles[idx].cAcc.z += cc*corrz;
}
}
//------------------------Lennard Jones Potential -----------------------------//
__host__ __device__ double lennardJonesForce(double dist, double sig, double eps)
{
double sigsq = sig*sig;
double con = 24.0*eps/sigsq;
double dist2 = dist * dist;
dist2 /= sigsq;
double dist4 = dist2*dist2;
double dist8 = dist4*dist4;
double dist14 = dist2*dist4*dist8;
double invdist8= 1.0/dist8;
double invdist14= 1.0/dist14;
double s = 2.0f*invdist14-invdist8;
return s * con;
}
//----------------------------------------------------------------------------//
__host__ __device__ double lennardJonesPotential(double dist, double sig, double eps)
{
double expr = sig/dist;
double expr2 = expr*expr;
double expr4 = expr2*expr2;
double expr6 = expr4*expr2;
double expr12 = expr6*expr6;
return 4.0*eps*(expr12-expr6);
}
//-------------------- force between two Si particles ---------------------//
__host__ __device__ double f2_derivative_of_rij_tag(double r_ij_tag)
{
double r_ij_tag_minus_a = r_ij_tag - a_Si;//r'ij-a
double r_ij_tag_minus_a2 = r_ij_tag_minus_a*r_ij_tag_minus_a;
double r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a);//(r'ij-a)^(-1)
double r_ij_tag_minus_a_in_mTwo = (1.0/(r_ij_tag_minus_a2));//(r'ij-a)^(-2)
double r_ij_tag2 = r_ij_tag*r_ij_tag;
double r_ij_tag4 = r_ij_tag2*r_ij_tag2;
double r_ij_tag5 = r_ij_tag4*r_ij_tag;
double r_ij_tag_in_mFive = (1.0/(r_ij_tag5));//r'ij^(-5)
double r_ij_tag_in_mFour = (1.0/(r_ij_tag4));//r'ij^(-4)
double expression = B_Si * r_ij_tag_in_mFour;
expression = expression - 1.0;//(B*r'ij^(-4) - 1)
double exponent = exp(r_ij_tag_minus_a_in_mOne);
double f2_derivative_part_1 = -4.0 * B_Si * r_ij_tag_in_mFive;
double f2_derivative_part_2 = expression * r_ij_tag_minus_a_in_mTwo;
return A_Si*exponent*(f2_derivative_part_1 - f2_derivative_part_2);
}
__host__ __device__ double v2_derivative_of_rix(real3 i, real3 j, double r_ij)
{
if(r_ij/sigma_Si == pow(2.0,1.0/6.0))
{
return 0;
}
double f2_derivative = f2_derivative_of_rij_tag(r_ij/sigma_Si);
f2_derivative = f2_derivative * (epsilon_Si/sigma_Si);//v2 derivative of distance
double dist_x = (i.x - j.x);
dist_x = dist_x / (r_ij);
double v2_derivative = f2_derivative * dist_x;
return v2_derivative;
}
__host__ __device__ double v2_derivative_of_riy(real3 i, real3 j, double r_ij)
{
if(r_ij/sigma_Si == pow(2.0,1.0/6.0))
{
return 0;
}
double f2_derivative = f2_derivative_of_rij_tag(r_ij/sigma_Si);
f2_derivative = f2_derivative * (epsilon_Si/sigma_Si);
double dist_y = i.y - j.y;
dist_y = dist_y / (r_ij);
double v2_derivative = f2_derivative * dist_y;
return v2_derivative;
}
__host__ __device__ double v2_derivative_of_riz(real3 i, real3 j, double r_ij)
{
if(r_ij/sigma_Si == pow(2.0,1.0/6.0))
{
return 0;
}
double f2_derivative = f2_derivative_of_rij_tag(r_ij/sigma_Si);
f2_derivative = f2_derivative * (epsilon_Si/sigma_Si);
double dist_z = i.z - j.z;
dist_z = dist_z / (r_ij);
double v2_derivative = f2_derivative * dist_z;
return v2_derivative;
}
//----------------------------------------------------------------------------//
//-------------------- potential between two Si particles ---------------------//
__host__ __device__ double f2(double r_ij_tag)
{
if(r_ij_tag >= a_Si)
{
return 0;
}
double r_ij_tag_minus_a_in_mOne = r_ij_tag - a_Si;
r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a_in_mOne);
double exponent = exp(r_ij_tag_minus_a_in_mOne);
double r_ij_tag2 = r_ij_tag*r_ij_tag;
double r_ij_tag4 = r_ij_tag2*r_ij_tag2;
double expression = (1.0/(r_ij_tag4));
expression *= B_Si;
expression -= 1.0;
return A_Si*expression*exponent;
}
__host__ __device__ double v2(double r_ij_tag)
{
if(r_ij_tag == pow(2.0,1.0/6.0))
{
return -epsilon_Si;
}
return f2(r_ij_tag)*epsilon_Si;
}
//----------------------------------------------------------------------------//
//------------------------ force between three Si particles -------------------//
__host__ __device__ double hi_derivative_of_rij_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosJik_plus_oneThird = ((r_ij_tag*r_ij_tag + r_ik_tag*r_ik_tag - r_jk_tag*r_jk_tag)/(2.0 * r_ij_tag * r_ik_tag)) + (1.0/3.0);
double r_ij_tag_minus_a = r_ij_tag - a_Si;
double r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a) * gama_Si;
double r_ij_tag_minus_a_in_mTwo = (1.0/(r_ij_tag_minus_a*r_ij_tag_minus_a)) * gama_Si;
double r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag - a_Si) * gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne+r_ik_tag_minus_a_in_mOne);
double expression = (r_ij_tag*r_ij_tag - r_ik_tag*r_ik_tag + r_jk_tag*r_jk_tag) / (r_ij_tag*r_ij_tag * r_ik_tag);
expression -= (r_ij_tag_minus_a_in_mTwo*cosJik_plus_oneThird);
return lamda_Si*exponent*cosJik_plus_oneThird*expression;
}
__host__ __device__ double hi_derivative_of_rik_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosJik_plus_oneThird = ((r_ij_tag*r_ij_tag + r_ik_tag*r_ik_tag - r_jk_tag*r_jk_tag)/(2.0 * r_ij_tag * r_ik_tag)) + (1.0/3.0);
double r_ij_tag_minus_a_in_mOne = (1.0/(r_ij_tag - a_Si)) * gama_Si;
double r_ik_tag_minus_a = r_ik_tag - a_Si;
double r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag_minus_a) * gama_Si;
double r_ik_tag_minus_a_in_mTwo = (1.0/(r_ik_tag_minus_a*r_ik_tag_minus_a)) * gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne+r_ik_tag_minus_a_in_mOne);
double expression = (r_ik_tag*r_ik_tag - r_ij_tag*r_ij_tag + r_jk_tag*r_jk_tag) / (r_ik_tag*r_ik_tag * r_ij_tag);
expression -= (r_ik_tag_minus_a_in_mTwo*cosJik_plus_oneThird);
return lamda_Si*exponent*cosJik_plus_oneThird*expression;
}
__host__ __device__ double hj_derivative_of_rij_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIjk_plus_oneThird = ((r_ij_tag*r_ij_tag + r_jk_tag*r_jk_tag - r_ik_tag*r_ik_tag)/(2.0 * r_ij_tag * r_jk_tag)) + (1.0/3.0);
double r_ij_tag_minus_a = r_ij_tag - a_Si;
double r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a) * gama_Si;
double r_ij_tag_minus_a_in_mTwo = (1.0/(r_ij_tag_minus_a*r_ij_tag_minus_a)) * gama_Si;
double r_jk_tag_minus_a_in_mOne = (1.0/(r_jk_tag - a_Si)) * gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne+r_jk_tag_minus_a_in_mOne);
double expression = (r_ij_tag*r_ij_tag - r_jk_tag*r_jk_tag + r_ik_tag*r_ik_tag) / (r_ij_tag*r_ij_tag * r_jk_tag);
expression -= (r_ij_tag_minus_a_in_mTwo*cosIjk_plus_oneThird);
return lamda_Si*exponent*cosIjk_plus_oneThird*expression;
}
__host__ __device__ double hj_derivative_of_rik_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIjk_plus_oneThird = ((r_ij_tag*r_ij_tag + r_jk_tag*r_jk_tag - r_ik_tag*r_ik_tag)/(2.0 * r_ij_tag * r_jk_tag)) + (1.0/3.0);
double r_ij_tag_minus_a_in_mOne = (1.0/(r_ij_tag - a_Si)) * gama_Si;
double r_jk_tag_minus_a_in_mOne = (1.0/(r_jk_tag - a_Si)) * gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne+r_jk_tag_minus_a_in_mOne);
double expression = (-r_ik_tag) / (r_ij_tag * r_jk_tag);
return lamda_Si*exponent*2.0*cosIjk_plus_oneThird*expression;
}
__host__ __device__ double hk_derivative_of_rij_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIkj_plus_oneThird = ((r_ik_tag*r_ik_tag + r_jk_tag*r_jk_tag - r_ij_tag*r_ij_tag)/(2 * r_ik_tag * r_jk_tag)) + (1.0/3.0);
double r_ik_tag_minus_a_in_mOne = (1.0/(r_ik_tag - a_Si)) * gama_Si;
double r_jk_tag_minus_a_in_mOne = (1.0/(r_jk_tag - a_Si)) * gama_Si;
double exponent = exp(r_ik_tag_minus_a_in_mOne+r_jk_tag_minus_a_in_mOne);
double expression = (-r_ij_tag) / (r_ik_tag * r_jk_tag);
return lamda_Si*exponent*2.0*cosIkj_plus_oneThird*expression;
}
__host__ __device__ double hk_derivative_of_rik_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIkj_plus_oneThird = ((r_ik_tag*r_ik_tag + r_jk_tag*r_jk_tag - r_ij_tag*r_ij_tag)/(2.0 * r_ik_tag * r_jk_tag)) + (1.0/3.0);
double r_jk_tag_minus_a_in_mOne = (1.0/(r_jk_tag - a_Si)) * gama_Si;
double r_ik_tag_minus_a = r_ik_tag - a_Si;
double r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag_minus_a) * gama_Si;
double r_ik_tag_minus_a_in_mTwo = (1.0/(r_ik_tag_minus_a*r_ik_tag_minus_a)) * gama_Si;
double exponent = exp(r_ik_tag_minus_a_in_mOne+r_jk_tag_minus_a_in_mOne);
double expression = (r_ik_tag*r_ik_tag - r_jk_tag*r_jk_tag + r_ij_tag*r_ij_tag) / (r_ik_tag*r_ik_tag * r_jk_tag);
expression -= (r_ik_tag_minus_a_in_mTwo*cosIkj_plus_oneThird);
return lamda_Si*exponent*cosIkj_plus_oneThird*expression;
}
__host__ __device__ double f3_derivative_of_rij_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double hi_derivative_of_rij = 0.0;
double hj_derivative_of_rij = 0.0;
double hk_derivative_of_rij = 0.0;
if(r_ij_tag < a_Si && r_ik_tag < a_Si)
{
hi_derivative_of_rij = hi_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ij_tag < a_Si)
{
hj_derivative_of_rij = hj_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ik_tag < a_Si)
{
hk_derivative_of_rij = hk_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
return hi_derivative_of_rij + hj_derivative_of_rij + hk_derivative_of_rij;
}
__host__ __device__ double f3_derivative_of_rik_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double hi_derivative_of_rik = 0.0;
double hj_derivative_of_rik = 0.0;
double hk_derivative_of_rik = 0.0;
if(r_ik_tag < a_Si && r_ij_tag < a_Si)
{
hi_derivative_of_rik = hi_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ij_tag < a_Si)
{
hj_derivative_of_rik = hj_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ik_tag < a_Si)
{
hk_derivative_of_rik = hk_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
return hi_derivative_of_rik + hj_derivative_of_rik + hk_derivative_of_rik;
}
__host__ __device__ double v3_derivative_of_rix(real3 i, real3 j, real3 k, double r_ij, double r_ik, double r_jk)
{
double v3_derived_by_rij = (f3_derivative_of_rij_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double v3_derived_by_rik = (f3_derivative_of_rik_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double dist_ijx = (i.x-j.x);
double dist_ikx = (i.x-k.x);
double expression1 = (dist_ijx/(r_ij));
double expression2 = (dist_ikx/(r_ik));
return v3_derived_by_rij*expression1 + v3_derived_by_rik*expression2;
}
__host__ __device__ double v3_derivative_of_riy(real3 i, real3 j, real3 k, double r_ij, double r_ik, double r_jk)
{
double v3_derived_by_rij = (f3_derivative_of_rij_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double v3_derived_by_rik = (f3_derivative_of_rik_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double dist_ijy = (i.y-j.y);
double dist_iky = (i.y-k.y);
double expression1 = (dist_ijy/(r_ij));
double expression2 = (dist_iky/(r_ik));
return v3_derived_by_rij*expression1 + v3_derived_by_rik*expression2;
}
__host__ __device__ double v3_derivative_of_riz(real3 i, real3 j, real3 k, double r_ij, double r_ik, double r_jk)
{
double v3_derived_by_rij = (f3_derivative_of_rij_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double v3_derived_by_rik = (f3_derivative_of_rik_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double dist_ijz = (i.z-j.z);
double dist_ikz = (i.z-k.z);
double expression1 = (dist_ijz/(r_ij));
double expression2 = (dist_ikz/(r_ik));
return v3_derived_by_rij*expression1 + v3_derived_by_rik*expression2;
}
//----------------------------------------------------------------------------//
//-------------------- potential between three Si particles -------------------//
__host__ __device__ double hi(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosJik_plus_oneThird = ((r_ij_tag*r_ij_tag + r_ik_tag*r_ik_tag - r_jk_tag*r_jk_tag)/(2.0 * r_ij_tag * r_ik_tag)) + (1.0/3.0);
double r_ij_tag_minus_a_in_mOne = r_ij_tag - a_Si;
r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a_in_mOne);
r_ij_tag_minus_a_in_mOne *= gama_Si;
double r_ik_tag_minus_a_in_mOne = r_ik_tag - a_Si;
r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag_minus_a_in_mOne);
r_ik_tag_minus_a_in_mOne *= gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne + r_ik_tag_minus_a_in_mOne);
return lamda_Si*exponent*cosJik_plus_oneThird*cosJik_plus_oneThird;
}
__host__ __device__ double hj(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIjk_plus_oneThird = ((r_ij_tag*r_ij_tag + r_jk_tag*r_jk_tag - r_ik_tag*r_ik_tag)/(2.0 * r_ij_tag * r_jk_tag)) + (1.0/3.0);
double r_ij_tag_minus_a_in_mOne = r_ij_tag - a_Si;
r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a_in_mOne);
r_ij_tag_minus_a_in_mOne *= gama_Si;
double r_jk_tag_minus_a_in_mOne = r_ik_tag - a_Si;
r_jk_tag_minus_a_in_mOne = (1/r_jk_tag_minus_a_in_mOne);
r_jk_tag_minus_a_in_mOne *= gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne + r_jk_tag_minus_a_in_mOne);
return lamda_Si*exponent*cosIjk_plus_oneThird*cosIjk_plus_oneThird;
}
__host__ __device__ double hk(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIkj_plus_oneThird = ((r_ik_tag*r_ik_tag + r_jk_tag*r_jk_tag - r_ij_tag*r_ij_tag)/(2.0 * r_ik_tag * r_jk_tag)) + (1.0/3.0);
double r_ik_tag_minus_a_in_mOne = r_ik_tag - a_Si;
r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag_minus_a_in_mOne);
r_ik_tag_minus_a_in_mOne *= gama_Si;
double r_jk_tag_minus_a_in_mOne = r_jk_tag - a_Si;
r_jk_tag_minus_a_in_mOne = (1.0/r_jk_tag_minus_a_in_mOne);
r_jk_tag_minus_a_in_mOne *= gama_Si;
double exponent = exp(r_ik_tag_minus_a_in_mOne + r_jk_tag_minus_a_in_mOne);
return lamda_Si*exponent*cosIkj_plus_oneThird*cosIkj_plus_oneThird;
}
__host__ __device__ double f3(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double h_i = 0.0;
double h_j = 0.0;
double h_k = 0.0;
if(r_ij_tag < a_Si && r_ik_tag < a_Si)
{
h_i = hi(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_ij_tag < a_Si && r_ik_tag < a_Si)
{
h_j = hj(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ik_tag < a_Si)
{
h_k = hk(r_ij_tag,r_ik_tag,r_jk_tag);
}
return h_i + h_j + h_k;
}
__host__ __device__ double v3(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
return f3(r_ij_tag,r_ik_tag,r_jk_tag)*epsilon_Si;
}
//----------------------------------------------------------------------------//
|
7a87eeb37fb1c529a4b0dc33726662c28f62dda1.cu
|
/*
*
*/
#include "Calculations.h"
__host__ __device__ double distance2(real3 i, real3 j)
{
return sqrt((i.x-j.x)*(i.x-j.x) + (i.y-j.y)*(i.y-j.y) + (i.z-j.z)*(i.z-j.z));
}
//-------------------------- calculate Force Si ------------------------------//
__global__ void
__launch_bounds__(1024, 4)
d_calculateForce_Si(int MAX_SI_NEIGHBORS, int MAX_XE_NEIGHBORS, particleStruct* siParticles, particleStruct* siParticles2, particleStruct* xeParticles, int numOfSi, int numOfXe, bool USE_NEIGHBOR_LISTS, bool useLennardJonesPotentialForSi)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < numOfSi)
{
real3 iPosition;
real3 jPosition;
real3 kPosition;
double r_ij = 0.0;
double r_ik = 0.0;
double r_jk = 0.0;
iPosition = siParticles[idx].position;
siParticles[idx].force.x = 0.0;
siParticles[idx].force.y = 0.0;
siParticles[idx].force.z = 0.0;
for(int j = 0; j < numOfSi; j++)
{
if(j != idx)
{
jPosition = siParticles2[j].position;
r_ij = distance2(iPosition, jPosition);
if(r_ij/sigma_Si < a_Si)
{
siParticles[idx].force.x -= v2_derivative_of_rix(iPosition, jPosition, r_ij);
siParticles[idx].force.y -= v2_derivative_of_riy(iPosition, jPosition, r_ij);
siParticles[idx].force.z -= v2_derivative_of_riz(iPosition, jPosition, r_ij);
}
for(int k = 0; k < numOfSi; k++)
{
if(k != idx && k != j)
{
kPosition = siParticles2[j].position;
r_ik = distance2(iPosition, kPosition);
r_jk = distance2(jPosition, kPosition);
if((r_ij/sigma_Si < a_Si && r_ik/sigma_Si < a_Si) || (r_ij/sigma_Si < a_Si && r_jk/sigma_Si < a_Si) || (r_ik/sigma_Si < a_Si && r_jk/sigma_Si < a_Si))
{
siParticles[idx].force.x -= v3_derivative_of_rix(iPosition, jPosition, kPosition, r_ij, r_ik, r_jk);
siParticles[idx].force.x -= v3_derivative_of_rix(iPosition, kPosition, jPosition, r_ik, r_ij, r_jk);
siParticles[idx].force.y -= v3_derivative_of_riy(iPosition, jPosition, kPosition, r_ij, r_ik, r_jk);
siParticles[idx].force.y -= v3_derivative_of_riy(iPosition, kPosition, jPosition, r_ik, r_ij, r_jk);
siParticles[idx].force.z -= v3_derivative_of_riz(iPosition, jPosition, kPosition, r_ij, r_ik, r_jk);
siParticles[idx].force.z -= v3_derivative_of_riz(iPosition, kPosition, jPosition, r_ik, r_ij, r_jk);
}
}
}
}
}
}
/* for(int j = 0; j < countXe; j++)
{
jPosition = xeParticles[j].position;
jPosition.x += 0.25*config->SI_LENGTH*space_Si;
jPosition.y += 0.25*config->SI_LENGTH*space_Si;
jPosition.z += config->SI_HEIGHT+config->LA_SPACE;
r_ij = distance2(iPosition, jPosition);
if(r_ij/sigma_Si_Xe < a_Si_Xe)
{
siParticles[i].force.x += ((iPosition.x-jPosition.x)*(lennardJonesForce(r_ij,sigma_Si_Xe,epsilon_Si_Xe)));//-lennardJonesForce(2.5*sigma_Si,sigma_Si_Xe,epsilon_Si_Xe)));
siParticles[i].force.y += ((iPosition.y-jPosition.y)*(lennardJonesForce(r_ij,sigma_Si_Xe,epsilon_Si_Xe)));//-lennardJonesForce(2.5*sigma_Si,sigma_Si_Xe,epsilon_Si_Xe)));
siParticles[i].force.z += ((iPosition.z-jPosition.z)*(lennardJonesForce(r_ij,sigma_Si_Xe,epsilon_Si_Xe)));//-lennardJonesForce(2.5*sigma_Si,sigma_Si_Xe,epsilon_Si_Xe)));
}
}*/
// }
}
//----------------------------------------------------------------------------//
__global__ void
__launch_bounds__(1024, 4)
d_calculateForce_Xe(int MAX_SI_NEIGHBORS, int MAX_XE_NEIGHBORS, particleStruct* xeParticles, particleStruct* xeParticles2, particleStruct* siParticles, int numOfSi, int numOfXe, bool USE_NEIGHBOR_LISTS)
{
// extern __shared__ particleStruct* sharedXe[];
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < numOfXe)
{
real3 iPosition;
real3 jPosition;
double r_ij = 0.0;
iPosition = xeParticles[idx].position;
xeParticles[idx].force.x = 0.0;
xeParticles[idx].force.y = 0.0;
xeParticles[idx].force.z = 0.0;
for(int j = 0; j < MAX_XE_NEIGHBORS && xeParticles[idx].xeNeighbors[j] != -1; j++)
{
if(j != idx)
{
jPosition = xeParticles[xeParticles[idx].xeNeighbors[j]].position;
r_ij = distance2(iPosition, jPosition);
if(r_ij/sigma_Xe_Xe < xe_Cluster)
{
xeParticles[idx].force.x += (iPosition.x-jPosition.x)*lennardJonesForce(r_ij,sigma_Xe_Xe,epsilon_Xe_Xe);
xeParticles[idx].force.y += (iPosition.y-jPosition.y)*lennardJonesForce(r_ij,sigma_Xe_Xe,epsilon_Xe_Xe);
xeParticles[idx].force.z += (iPosition.z-jPosition.z)*lennardJonesForce(r_ij,sigma_Xe_Xe,epsilon_Xe_Xe);
}
}
}
}
/* for(int j = 0; j < countSi; j++)
{
jPosition = siParticles[j].position;
r_ij = distance2(iPosition, jPosition);
if(r_ij/sigma_Si_Xe < a_Si_Xe)
{
xeParticles[i].force.x += (iPosition.x-jPosition.x)*lennardJonesForce(r_ij,sigma_Si_Xe,epsilon_Si_Xe);
xeParticles[i].force.y += (iPosition.y-jPosition.y)*lennardJonesForce(r_ij,sigma_Si_Xe,epsilon_Si_Xe);
xeParticles[i].force.z += (iPosition.z-jPosition.z)*lennardJonesForce(r_ij,sigma_Si_Xe,epsilon_Si_Xe);
}
}*/
// }
}
//////////////////////////////////////////////////////////////////////////////////////////////!!!!
//----------------------calculate total potential of Si -----------------------//
/*__global__ double d_V_total_Si(int SI_PARTICLES, particleStruct* siParticles)
{
}
//----------------------------------------------------------------------------//
__global__ double d_V_total_Si_Xe(int SI_PARTICLES, particleStruct* siParticles, int XE_PARTICLES, particleStruct* xeParticles)
{
}
__global__ double d_V_total_Xe(int XE_PARTICLES, particleStruct* xeParticles)
{
}
__global__ void d_V_total(int SI_PARTICLES, particleStruct* siParticles, int XE_PARTICLES, particleStruct* xeParticles)
{
}*/
//////////////////////////////////////////////////////////////////////////////////////////////!!!!
__global__ void d_initiateAcceleration(particleStruct *particles, int listSize, double mass)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < listSize)
{
particles[idx].aAcc.x = particles[idx].force.x/mass;
particles[idx].aAcc.y = particles[idx].force.y/mass;
particles[idx].aAcc.z = particles[idx].force.z/mass;
}
}
__global__ void d_predict(particleStruct *particles, int listSize, float dt)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double c1;
__shared__ double c2;
__shared__ double c3;
__shared__ double c4;
c1 = dt;
c2 = c1*dt/2.0;
c3 = c2*dt/3.0;
c4 = c3*dt/4.0;
if(idx < listSize)
{
particles[idx].position.x += c1*particles[idx].velocity.x + c2*particles[idx].aAcc.x + c3*particles[idx].bAcc.x + c4*particles[idx].cAcc.x;
particles[idx].position.y += c1*particles[idx].velocity.y + c2*particles[idx].aAcc.y + c3*particles[idx].bAcc.y + c4*particles[idx].cAcc.y;
particles[idx].position.z += c1*particles[idx].velocity.z + c2*particles[idx].aAcc.z + c3*particles[idx].bAcc.z + c4*particles[idx].cAcc.z;
particles[idx].velocity.x += c1*particles[idx].aAcc.x + c2*particles[idx].bAcc.x + c3*particles[idx].cAcc.x;
particles[idx].velocity.y += c1*particles[idx].aAcc.y + c2*particles[idx].bAcc.y + c3*particles[idx].cAcc.y;
particles[idx].velocity.z += c1*particles[idx].aAcc.z + c2*particles[idx].bAcc.z + c3*particles[idx].cAcc.z;
particles[idx].aAcc.x += c1*particles[idx].bAcc.x + c2*particles[idx].cAcc.x;
particles[idx].aAcc.y += c1*particles[idx].bAcc.y + c2*particles[idx].cAcc.y;
particles[idx].aAcc.z += c1*particles[idx].bAcc.z + c2*particles[idx].cAcc.z;
particles[idx].bAcc.x += c1*particles[idx].cAcc.x;
particles[idx].bAcc.y += c1*particles[idx].cAcc.y;
particles[idx].bAcc.z += c1*particles[idx].cAcc.z;
}
}
__global__ void d_correct(particleStruct *particles, double dt, int listSize, double mass)
{
__shared__ double c1;
__shared__ double c2;
__shared__ double c3;
__shared__ double c4;
__shared__ double cr;
__shared__ double cv;
__shared__ double cb;
__shared__ double cc;
c1 = dt ;
c2 = c1*dt/2.0;
c3 = c2*dt/3.0;
c4 = c3*dt/4.0;
cr = GEAR1*c2;
cv = GEAR2*c2/c1;
cb = GEAR3*c2/c3;
cc = GEAR4*c2/c4;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < listSize)
{
double axi = particles[idx].force.x/mass;
double ayi = particles[idx].force.y/mass;
double azi = particles[idx].force.z/mass;
double corrx = axi - particles[idx].aAcc.x;
double corry = ayi - particles[idx].aAcc.y;
double corrz = azi - particles[idx].aAcc.z;
particles[idx].position.x += cr*corrx;
particles[idx].position.y += cr*corry;
particles[idx].position.z += cr*corrz;
particles[idx].velocity.x += cv*corrx;
particles[idx].velocity.y += cv*corry;
particles[idx].velocity.z += cv*corrz;
particles[idx].aAcc.x = axi;
particles[idx].aAcc.y = ayi;
particles[idx].aAcc.z = azi;
particles[idx].bAcc.x += cb*corrx;
particles[idx].bAcc.y += cb*corry;
particles[idx].bAcc.z += cb*corrz;
particles[idx].cAcc.x += cc*corrx;
particles[idx].cAcc.y += cc*corry;
particles[idx].cAcc.z += cc*corrz;
}
}
//------------------------Lennard Jones Potential -----------------------------//
__host__ __device__ double lennardJonesForce(double dist, double sig, double eps)
{
double sigsq = sig*sig;
double con = 24.0*eps/sigsq;
double dist2 = dist * dist;
dist2 /= sigsq;
double dist4 = dist2*dist2;
double dist8 = dist4*dist4;
double dist14 = dist2*dist4*dist8;
double invdist8= 1.0/dist8;
double invdist14= 1.0/dist14;
double s = 2.0f*invdist14-invdist8;
return s * con;
}
//----------------------------------------------------------------------------//
__host__ __device__ double lennardJonesPotential(double dist, double sig, double eps)
{
double expr = sig/dist;
double expr2 = expr*expr;
double expr4 = expr2*expr2;
double expr6 = expr4*expr2;
double expr12 = expr6*expr6;
return 4.0*eps*(expr12-expr6);
}
//-------------------- force between two Si particles ---------------------//
__host__ __device__ double f2_derivative_of_rij_tag(double r_ij_tag)
{
double r_ij_tag_minus_a = r_ij_tag - a_Si;//r'ij-a
double r_ij_tag_minus_a2 = r_ij_tag_minus_a*r_ij_tag_minus_a;
double r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a);//(r'ij-a)^(-1)
double r_ij_tag_minus_a_in_mTwo = (1.0/(r_ij_tag_minus_a2));//(r'ij-a)^(-2)
double r_ij_tag2 = r_ij_tag*r_ij_tag;
double r_ij_tag4 = r_ij_tag2*r_ij_tag2;
double r_ij_tag5 = r_ij_tag4*r_ij_tag;
double r_ij_tag_in_mFive = (1.0/(r_ij_tag5));//r'ij^(-5)
double r_ij_tag_in_mFour = (1.0/(r_ij_tag4));//r'ij^(-4)
double expression = B_Si * r_ij_tag_in_mFour;
expression = expression - 1.0;//(B*r'ij^(-4) - 1)
double exponent = exp(r_ij_tag_minus_a_in_mOne);
double f2_derivative_part_1 = -4.0 * B_Si * r_ij_tag_in_mFive;
double f2_derivative_part_2 = expression * r_ij_tag_minus_a_in_mTwo;
return A_Si*exponent*(f2_derivative_part_1 - f2_derivative_part_2);
}
__host__ __device__ double v2_derivative_of_rix(real3 i, real3 j, double r_ij)
{
if(r_ij/sigma_Si == pow(2.0,1.0/6.0))
{
return 0;
}
double f2_derivative = f2_derivative_of_rij_tag(r_ij/sigma_Si);
f2_derivative = f2_derivative * (epsilon_Si/sigma_Si);//v2 derivative of distance
double dist_x = (i.x - j.x);
dist_x = dist_x / (r_ij);
double v2_derivative = f2_derivative * dist_x;
return v2_derivative;
}
__host__ __device__ double v2_derivative_of_riy(real3 i, real3 j, double r_ij)
{
if(r_ij/sigma_Si == pow(2.0,1.0/6.0))
{
return 0;
}
double f2_derivative = f2_derivative_of_rij_tag(r_ij/sigma_Si);
f2_derivative = f2_derivative * (epsilon_Si/sigma_Si);
double dist_y = i.y - j.y;
dist_y = dist_y / (r_ij);
double v2_derivative = f2_derivative * dist_y;
return v2_derivative;
}
__host__ __device__ double v2_derivative_of_riz(real3 i, real3 j, double r_ij)
{
if(r_ij/sigma_Si == pow(2.0,1.0/6.0))
{
return 0;
}
double f2_derivative = f2_derivative_of_rij_tag(r_ij/sigma_Si);
f2_derivative = f2_derivative * (epsilon_Si/sigma_Si);
double dist_z = i.z - j.z;
dist_z = dist_z / (r_ij);
double v2_derivative = f2_derivative * dist_z;
return v2_derivative;
}
//----------------------------------------------------------------------------//
//-------------------- potential between two Si particles ---------------------//
__host__ __device__ double f2(double r_ij_tag)
{
if(r_ij_tag >= a_Si)
{
return 0;
}
double r_ij_tag_minus_a_in_mOne = r_ij_tag - a_Si;
r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a_in_mOne);
double exponent = exp(r_ij_tag_minus_a_in_mOne);
double r_ij_tag2 = r_ij_tag*r_ij_tag;
double r_ij_tag4 = r_ij_tag2*r_ij_tag2;
double expression = (1.0/(r_ij_tag4));
expression *= B_Si;
expression -= 1.0;
return A_Si*expression*exponent;
}
__host__ __device__ double v2(double r_ij_tag)
{
if(r_ij_tag == pow(2.0,1.0/6.0))
{
return -epsilon_Si;
}
return f2(r_ij_tag)*epsilon_Si;
}
//----------------------------------------------------------------------------//
//------------------------ force between three Si particles -------------------//
__host__ __device__ double hi_derivative_of_rij_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosJik_plus_oneThird = ((r_ij_tag*r_ij_tag + r_ik_tag*r_ik_tag - r_jk_tag*r_jk_tag)/(2.0 * r_ij_tag * r_ik_tag)) + (1.0/3.0);
double r_ij_tag_minus_a = r_ij_tag - a_Si;
double r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a) * gama_Si;
double r_ij_tag_minus_a_in_mTwo = (1.0/(r_ij_tag_minus_a*r_ij_tag_minus_a)) * gama_Si;
double r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag - a_Si) * gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne+r_ik_tag_minus_a_in_mOne);
double expression = (r_ij_tag*r_ij_tag - r_ik_tag*r_ik_tag + r_jk_tag*r_jk_tag) / (r_ij_tag*r_ij_tag * r_ik_tag);
expression -= (r_ij_tag_minus_a_in_mTwo*cosJik_plus_oneThird);
return lamda_Si*exponent*cosJik_plus_oneThird*expression;
}
__host__ __device__ double hi_derivative_of_rik_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosJik_plus_oneThird = ((r_ij_tag*r_ij_tag + r_ik_tag*r_ik_tag - r_jk_tag*r_jk_tag)/(2.0 * r_ij_tag * r_ik_tag)) + (1.0/3.0);
double r_ij_tag_minus_a_in_mOne = (1.0/(r_ij_tag - a_Si)) * gama_Si;
double r_ik_tag_minus_a = r_ik_tag - a_Si;
double r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag_minus_a) * gama_Si;
double r_ik_tag_minus_a_in_mTwo = (1.0/(r_ik_tag_minus_a*r_ik_tag_minus_a)) * gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne+r_ik_tag_minus_a_in_mOne);
double expression = (r_ik_tag*r_ik_tag - r_ij_tag*r_ij_tag + r_jk_tag*r_jk_tag) / (r_ik_tag*r_ik_tag * r_ij_tag);
expression -= (r_ik_tag_minus_a_in_mTwo*cosJik_plus_oneThird);
return lamda_Si*exponent*cosJik_plus_oneThird*expression;
}
__host__ __device__ double hj_derivative_of_rij_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIjk_plus_oneThird = ((r_ij_tag*r_ij_tag + r_jk_tag*r_jk_tag - r_ik_tag*r_ik_tag)/(2.0 * r_ij_tag * r_jk_tag)) + (1.0/3.0);
double r_ij_tag_minus_a = r_ij_tag - a_Si;
double r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a) * gama_Si;
double r_ij_tag_minus_a_in_mTwo = (1.0/(r_ij_tag_minus_a*r_ij_tag_minus_a)) * gama_Si;
double r_jk_tag_minus_a_in_mOne = (1.0/(r_jk_tag - a_Si)) * gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne+r_jk_tag_minus_a_in_mOne);
double expression = (r_ij_tag*r_ij_tag - r_jk_tag*r_jk_tag + r_ik_tag*r_ik_tag) / (r_ij_tag*r_ij_tag * r_jk_tag);
expression -= (r_ij_tag_minus_a_in_mTwo*cosIjk_plus_oneThird);
return lamda_Si*exponent*cosIjk_plus_oneThird*expression;
}
__host__ __device__ double hj_derivative_of_rik_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIjk_plus_oneThird = ((r_ij_tag*r_ij_tag + r_jk_tag*r_jk_tag - r_ik_tag*r_ik_tag)/(2.0 * r_ij_tag * r_jk_tag)) + (1.0/3.0);
double r_ij_tag_minus_a_in_mOne = (1.0/(r_ij_tag - a_Si)) * gama_Si;
double r_jk_tag_minus_a_in_mOne = (1.0/(r_jk_tag - a_Si)) * gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne+r_jk_tag_minus_a_in_mOne);
double expression = (-r_ik_tag) / (r_ij_tag * r_jk_tag);
return lamda_Si*exponent*2.0*cosIjk_plus_oneThird*expression;
}
__host__ __device__ double hk_derivative_of_rij_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIkj_plus_oneThird = ((r_ik_tag*r_ik_tag + r_jk_tag*r_jk_tag - r_ij_tag*r_ij_tag)/(2 * r_ik_tag * r_jk_tag)) + (1.0/3.0);
double r_ik_tag_minus_a_in_mOne = (1.0/(r_ik_tag - a_Si)) * gama_Si;
double r_jk_tag_minus_a_in_mOne = (1.0/(r_jk_tag - a_Si)) * gama_Si;
double exponent = exp(r_ik_tag_minus_a_in_mOne+r_jk_tag_minus_a_in_mOne);
double expression = (-r_ij_tag) / (r_ik_tag * r_jk_tag);
return lamda_Si*exponent*2.0*cosIkj_plus_oneThird*expression;
}
__host__ __device__ double hk_derivative_of_rik_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIkj_plus_oneThird = ((r_ik_tag*r_ik_tag + r_jk_tag*r_jk_tag - r_ij_tag*r_ij_tag)/(2.0 * r_ik_tag * r_jk_tag)) + (1.0/3.0);
double r_jk_tag_minus_a_in_mOne = (1.0/(r_jk_tag - a_Si)) * gama_Si;
double r_ik_tag_minus_a = r_ik_tag - a_Si;
double r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag_minus_a) * gama_Si;
double r_ik_tag_minus_a_in_mTwo = (1.0/(r_ik_tag_minus_a*r_ik_tag_minus_a)) * gama_Si;
double exponent = exp(r_ik_tag_minus_a_in_mOne+r_jk_tag_minus_a_in_mOne);
double expression = (r_ik_tag*r_ik_tag - r_jk_tag*r_jk_tag + r_ij_tag*r_ij_tag) / (r_ik_tag*r_ik_tag * r_jk_tag);
expression -= (r_ik_tag_minus_a_in_mTwo*cosIkj_plus_oneThird);
return lamda_Si*exponent*cosIkj_plus_oneThird*expression;
}
__host__ __device__ double f3_derivative_of_rij_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double hi_derivative_of_rij = 0.0;
double hj_derivative_of_rij = 0.0;
double hk_derivative_of_rij = 0.0;
if(r_ij_tag < a_Si && r_ik_tag < a_Si)
{
hi_derivative_of_rij = hi_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ij_tag < a_Si)
{
hj_derivative_of_rij = hj_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ik_tag < a_Si)
{
hk_derivative_of_rij = hk_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
return hi_derivative_of_rij + hj_derivative_of_rij + hk_derivative_of_rij;
}
__host__ __device__ double f3_derivative_of_rik_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double hi_derivative_of_rik = 0.0;
double hj_derivative_of_rik = 0.0;
double hk_derivative_of_rik = 0.0;
if(r_ik_tag < a_Si && r_ij_tag < a_Si)
{
hi_derivative_of_rik = hi_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ij_tag < a_Si)
{
hj_derivative_of_rik = hj_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ik_tag < a_Si)
{
hk_derivative_of_rik = hk_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
return hi_derivative_of_rik + hj_derivative_of_rik + hk_derivative_of_rik;
}
__host__ __device__ double v3_derivative_of_rix(real3 i, real3 j, real3 k, double r_ij, double r_ik, double r_jk)
{
double v3_derived_by_rij = (f3_derivative_of_rij_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double v3_derived_by_rik = (f3_derivative_of_rik_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double dist_ijx = (i.x-j.x);
double dist_ikx = (i.x-k.x);
double expression1 = (dist_ijx/(r_ij));
double expression2 = (dist_ikx/(r_ik));
return v3_derived_by_rij*expression1 + v3_derived_by_rik*expression2;
}
__host__ __device__ double v3_derivative_of_riy(real3 i, real3 j, real3 k, double r_ij, double r_ik, double r_jk)
{
double v3_derived_by_rij = (f3_derivative_of_rij_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double v3_derived_by_rik = (f3_derivative_of_rik_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double dist_ijy = (i.y-j.y);
double dist_iky = (i.y-k.y);
double expression1 = (dist_ijy/(r_ij));
double expression2 = (dist_iky/(r_ik));
return v3_derived_by_rij*expression1 + v3_derived_by_rik*expression2;
}
__host__ __device__ double v3_derivative_of_riz(real3 i, real3 j, real3 k, double r_ij, double r_ik, double r_jk)
{
double v3_derived_by_rij = (f3_derivative_of_rij_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double v3_derived_by_rik = (f3_derivative_of_rik_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double dist_ijz = (i.z-j.z);
double dist_ikz = (i.z-k.z);
double expression1 = (dist_ijz/(r_ij));
double expression2 = (dist_ikz/(r_ik));
return v3_derived_by_rij*expression1 + v3_derived_by_rik*expression2;
}
//----------------------------------------------------------------------------//
//-------------------- potential between three Si particles -------------------//
__host__ __device__ double hi(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosJik_plus_oneThird = ((r_ij_tag*r_ij_tag + r_ik_tag*r_ik_tag - r_jk_tag*r_jk_tag)/(2.0 * r_ij_tag * r_ik_tag)) + (1.0/3.0);
double r_ij_tag_minus_a_in_mOne = r_ij_tag - a_Si;
r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a_in_mOne);
r_ij_tag_minus_a_in_mOne *= gama_Si;
double r_ik_tag_minus_a_in_mOne = r_ik_tag - a_Si;
r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag_minus_a_in_mOne);
r_ik_tag_minus_a_in_mOne *= gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne + r_ik_tag_minus_a_in_mOne);
return lamda_Si*exponent*cosJik_plus_oneThird*cosJik_plus_oneThird;
}
__host__ __device__ double hj(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIjk_plus_oneThird = ((r_ij_tag*r_ij_tag + r_jk_tag*r_jk_tag - r_ik_tag*r_ik_tag)/(2.0 * r_ij_tag * r_jk_tag)) + (1.0/3.0);
double r_ij_tag_minus_a_in_mOne = r_ij_tag - a_Si;
r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a_in_mOne);
r_ij_tag_minus_a_in_mOne *= gama_Si;
double r_jk_tag_minus_a_in_mOne = r_ik_tag - a_Si;
r_jk_tag_minus_a_in_mOne = (1/r_jk_tag_minus_a_in_mOne);
r_jk_tag_minus_a_in_mOne *= gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne + r_jk_tag_minus_a_in_mOne);
return lamda_Si*exponent*cosIjk_plus_oneThird*cosIjk_plus_oneThird;
}
__host__ __device__ double hk(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIkj_plus_oneThird = ((r_ik_tag*r_ik_tag + r_jk_tag*r_jk_tag - r_ij_tag*r_ij_tag)/(2.0 * r_ik_tag * r_jk_tag)) + (1.0/3.0);
double r_ik_tag_minus_a_in_mOne = r_ik_tag - a_Si;
r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag_minus_a_in_mOne);
r_ik_tag_minus_a_in_mOne *= gama_Si;
double r_jk_tag_minus_a_in_mOne = r_jk_tag - a_Si;
r_jk_tag_minus_a_in_mOne = (1.0/r_jk_tag_minus_a_in_mOne);
r_jk_tag_minus_a_in_mOne *= gama_Si;
double exponent = exp(r_ik_tag_minus_a_in_mOne + r_jk_tag_minus_a_in_mOne);
return lamda_Si*exponent*cosIkj_plus_oneThird*cosIkj_plus_oneThird;
}
__host__ __device__ double f3(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double h_i = 0.0;
double h_j = 0.0;
double h_k = 0.0;
if(r_ij_tag < a_Si && r_ik_tag < a_Si)
{
h_i = hi(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_ij_tag < a_Si && r_ik_tag < a_Si)
{
h_j = hj(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ik_tag < a_Si)
{
h_k = hk(r_ij_tag,r_ik_tag,r_jk_tag);
}
return h_i + h_j + h_k;
}
__host__ __device__ double v3(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
return f3(r_ij_tag,r_ik_tag,r_jk_tag)*epsilon_Si;
}
//----------------------------------------------------------------------------//
|
cdf63b21a6f3545786599946e36b2b32cfcb369e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_2_left;
int xdim0_update_halo_kernel5_plus_2_left_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_2_left;
int ydim0_update_halo_kernel5_plus_2_left_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_2_left;
int xdim1_update_halo_kernel5_plus_2_left_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_2_left;
int ydim1_update_halo_kernel5_plus_2_left_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_2_left*(y)+xdim0_update_halo_kernel5_plus_2_left*ydim0_update_halo_kernel5_plus_2_left*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_2_left*(y)+xdim1_update_halo_kernel5_plus_2_left*ydim1_update_halo_kernel5_plus_2_left*(z))
//user function
__device__
inline void update_halo_kernel5_plus_2_left_gpu(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = (vol_flux_z[OPS_ACC0(2,0,0)]);
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = (mass_flux_z[OPS_ACC1(2,0,0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_2_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel5_plus_2_left + idx_z * 1*1 * xdim0_update_halo_kernel5_plus_2_left * ydim0_update_halo_kernel5_plus_2_left;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel5_plus_2_left + idx_z * 1*1 * xdim1_update_halo_kernel5_plus_2_left * ydim1_update_halo_kernel5_plus_2_left;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_2_left_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_2_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,89)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(89,"update_halo_kernel5_plus_2_left");
OPS_kernels[89].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_2_left_h || ydim0 != ydim0_update_halo_kernel5_plus_2_left_h || xdim1 != xdim1_update_halo_kernel5_plus_2_left_h || ydim1 != ydim1_update_halo_kernel5_plus_2_left_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel5_plus_2_left, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_plus_2_left_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel5_plus_2_left, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_plus_2_left_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel5_plus_2_left, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_plus_2_left_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel5_plus_2_left, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_plus_2_left_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[89].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_2_left), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[89].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[89].mpi_time += t2-t1;
OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 89;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 89;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_2_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(89,"update_halo_kernel5_plus_2_left");
}
ops_enqueue_kernel(desc);
}
#endif
|
cdf63b21a6f3545786599946e36b2b32cfcb369e.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_2_left;
int xdim0_update_halo_kernel5_plus_2_left_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_2_left;
int ydim0_update_halo_kernel5_plus_2_left_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_2_left;
int xdim1_update_halo_kernel5_plus_2_left_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_2_left;
int ydim1_update_halo_kernel5_plus_2_left_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_2_left*(y)+xdim0_update_halo_kernel5_plus_2_left*ydim0_update_halo_kernel5_plus_2_left*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_2_left*(y)+xdim1_update_halo_kernel5_plus_2_left*ydim1_update_halo_kernel5_plus_2_left*(z))
//user function
__device__
inline void update_halo_kernel5_plus_2_left_gpu(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = (vol_flux_z[OPS_ACC0(2,0,0)]);
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = (mass_flux_z[OPS_ACC1(2,0,0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_2_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel5_plus_2_left + idx_z * 1*1 * xdim0_update_halo_kernel5_plus_2_left * ydim0_update_halo_kernel5_plus_2_left;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel5_plus_2_left + idx_z * 1*1 * xdim1_update_halo_kernel5_plus_2_left * ydim1_update_halo_kernel5_plus_2_left;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_2_left_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_2_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,89)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(89,"update_halo_kernel5_plus_2_left");
OPS_kernels[89].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_2_left_h || ydim0 != ydim0_update_halo_kernel5_plus_2_left_h || xdim1 != xdim1_update_halo_kernel5_plus_2_left_h || ydim1 != ydim1_update_halo_kernel5_plus_2_left_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel5_plus_2_left, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_plus_2_left_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel5_plus_2_left, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_plus_2_left_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel5_plus_2_left, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_plus_2_left_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel5_plus_2_left, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_plus_2_left_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[89].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel5_plus_2_left<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[89].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[89].mpi_time += t2-t1;
OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 89;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 89;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_2_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(89,"update_halo_kernel5_plus_2_left");
}
ops_enqueue_kernel(desc);
}
#endif
|
cd48f9429ce9097535054ff4ca81b97f19a94c1a.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// Created by Mark Van der Merwe, Summer 2018
//
#include "infer.h"
#include "infer_data.h"
#include "inference_helpers.h"
#include "../header.h"
#include "math_functions.h"
#include <stdio.h>
#include <cub.cuh>
#include <iostream>
#include <cmath>
#include "hip/hip_runtime_api.h"
#include <ctime>
// edge_frontier - the next frontier of edges to update for the ith step.
// edges_effected - the total frontier of updated edges for this round.
__device__ void advance_edge_frontier(int* edge_frontier, int* edges_effected, int* nodes_effected, int* item_to_outgoing_idx, int* outgoing, int id, int* edge_idx_to_dest_node_idx, bool include_inverse) {
int start_outgoing_idx = item_to_outgoing_idx[id];
int end_outgoing_idx = item_to_outgoing_idx[id + 1];
for (int outgoing_idx = start_outgoing_idx; outgoing_idx < end_outgoing_idx; ++outgoing_idx) {
int edge_id = outgoing[outgoing_idx];
edge_frontier[edge_id] = 1;
// For edges effected, we want to also add in our inverse edge.
if (include_inverse) {
int inverse_edge_id = edge_id % 2 == 0 ? edge_id + 1 : edge_id - 1;
edges_effected[inverse_edge_id] = 1;
}
edges_effected[edge_id] = 1;
nodes_effected[edge_idx_to_dest_node_idx[edge_id]] = 1;
}
}
__global__ void rs_node_select(device_graph d_graph, device_pgm d_pgm, int* node_ids, int* nodes_effected, int* frontier, int* edges_effected, int node_count, int edge_count) {
// Determine the node this thread will be responsible for.
int node = (blockIdx.x * blockDim.x) + threadIdx.x;
if (node < node_count) {
// Map the node number to the actual node id.
int node_id = node_ids[node];
// Determine the edges outgoing from this node.
advance_edge_frontier(frontier, edges_effected, nodes_effected, d_graph.node_idx_to_outgoing_edges, d_graph.node_outgoing_edges, node_id, d_graph.edge_idx_to_dest_node_idx, true);
// Mark this node as needing it's residual updated.
nodes_effected[node_id] = 1;
}
}
__global__ void rs_generate_next_frontier(device_graph d_graph, device_pgm d_pgm, int* nodes_effected, int* last_frontier, int* frontier, int* edges_effected, int edge_count, bool include_inverse) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int step = gridDim.x * blockDim.x;
for (int edge_id = idx; edge_id < edge_count; edge_id += step) {
if (last_frontier[edge_id] == 1) {
// Extend the edges effected to include this one's neighbors.
advance_edge_frontier(frontier, edges_effected, nodes_effected, d_graph.edge_idx_to_outgoing_edges, d_graph.edge_outgoing_edges, edge_id, d_graph.edge_idx_to_dest_node_idx, include_inverse);
}
}
}
__global__ void rs_calculate_updates(device_graph d_graph, device_pgm d_pgm, int* frontier, int edge_count, bool forward) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int step = gridDim.x * blockDim.x;
for (int edge_id = idx; edge_id < edge_count; edge_id += step) {
if (frontier[edge_id] == 1) {
int actual_edge_id;
if (!forward) {
// If we are doing the message collection phase, we need to swap the edge id to be the opposite direction edge.
actual_edge_id = edge_id % 2 == 0 ? edge_id + 1 : edge_id - 1;
} else {
actual_edge_id = edge_id;
}
// Compute the edge.
compute_message(d_graph, d_pgm, actual_edge_id);
}
}
}
__global__ void rs_compute_edge_residuals(device_graph d_graph, device_pgm d_pgm, double* edge_residuals, int* edges_effected, int edge_count) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int step = gridDim.x * blockDim.x;
for (int edge_id = idx; edge_id < edge_count; edge_id += step) {
if (edges_effected[edge_id] == 1) {
// Compute the edge once more. TODO: Cache updates.
compute_message(d_graph, d_pgm, edge_id);
// Now compute the residual from this.
double edge_residual = message_delta(d_pgm.edges, d_pgm.workspace, d_pgm.edge_idx_to_edges_idx[edge_id]);
edge_residuals[edge_id] = edge_residual;
}
// Clear edges effected.
edges_effected[edge_id] = 0;
}
}
__global__ void rs_compute_node_residuals(device_graph d_graph, device_pgm d_pgm, double* node_residuals, double* edge_residuals, int* nodes_effected, int node_count) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int step = gridDim.x * blockDim.x;
for (int node_id = idx; node_id < node_count; node_id += step) {
if (nodes_effected[node_id] == 1) {
// Compute residual of this node.
// Residual is defined as the max of the residuals of the incoming messages.
double residual = 0.0;
int start_incoming_idx = d_graph.node_idx_to_incoming_edges[node_id];
int end_incoming_idx = d_graph.node_idx_to_incoming_edges[node_id + 1];
for (int incoming_idx = start_incoming_idx; incoming_idx < end_incoming_idx; ++incoming_idx) {
double edge_residual = edge_residuals[d_graph.node_incoming_edges[incoming_idx]];
if (edge_residual > residual)
residual = edge_residual;
}
node_residuals[node_id] = residual;
}
// Clear effected nodes.
nodes_effected[node_id] = 0;
}
}
std::tuple<float, std::vector<double>, int, std::vector<std::pair<int, int>>, std::vector<std::pair<float, int>>> infer(pgm* pgm, double epsilon, int timeout, std::vector<int> runtime_params, bool verbose) {
//
// Setup GPU data.
//
std::pair<device_graph, device_pgm> infer_data = setup_gpu_data(pgm);
int num_edges = pgm->num_edges();
int num_nodes = pgm->pgm_graph->node_idx_to_incoming_edges.size() - 1;
int edge_rep_size = pgm->edges.size();
if (verbose) {
std::cout << "Number of edges: " << num_edges << std::endl;
std::cout << "Number of nodes: " << num_nodes << std::endl;
}
// Size of each splash.
int h = 2;
// Create a residual array - each node gets a residual.
// At each round, to determine who to update, we perform a key-value sort and choose the top p keys.
double* d_node_residuals;
gpuErrchk(hipMalloc((void**) &d_node_residuals, num_nodes * sizeof(double)));
std::vector<double> node_residuals_(num_nodes, 10.0); // Start all node_residuals as 10.0, so all nodes eventually update.
gpuErrchk(hipMemcpy(d_node_residuals, node_residuals_.data(), num_nodes * sizeof(double), hipMemcpyHostToDevice));
double* d_node_residuals_out;
gpuErrchk(hipMalloc((void**) &d_node_residuals_out, num_nodes * sizeof(double)));
double* top_residual = (double*) malloc(sizeof(double));
std::vector<int> node_ids_;
for (int i = 0; i < num_nodes; ++i) {
node_ids_.push_back(i);
}
int* d_node_ids;
gpuErrchk(hipMalloc((void**) &d_node_ids, num_nodes * sizeof(int)));
gpuErrchk(hipMemcpy(d_node_ids, node_ids_.data(), num_nodes * sizeof(int), hipMemcpyHostToDevice));
// We also need residuals for our edges, which are used to compute the node residuals.
double* d_edge_residuals;
gpuErrchk(hipMalloc((void**) &d_edge_residuals, num_edges * sizeof(double)));
std::vector<double> edge_residuals_(num_edges, 10.0); // Start all edge_residuals as 10.0, so all edges eventually update.
gpuErrchk(hipMemcpy(d_edge_residuals, edge_residuals_.data(), num_edges * sizeof(double), hipMemcpyHostToDevice));
// Indicate whether a given node's residual should be updated after other nodes are updated.
int* d_node_effected;
gpuErrchk(hipMalloc((void**) &d_node_effected, num_nodes * sizeof(int)));
std::vector<int> node_effected_(num_nodes, 0);
gpuErrchk(hipMemcpy(d_node_effected, node_effected_.data(), num_nodes * sizeof(int), hipMemcpyHostToDevice));
// TODO: The list of effected nodes to update residuals. Dense approach.
// Create h frontiers that will represent the splash.
std::vector<int> edge_effected_(num_edges, 0);
std::vector<int*> d_frontiers;
for (int frontier_id = 0; frontier_id < h; ++frontier_id) {
int* d_frontier;
gpuErrchk(hipMalloc((void**) &d_frontier, num_edges * sizeof(int)));
gpuErrchk(hipMemcpy(d_frontier, edge_effected_.data(), num_edges * sizeof(int), hipMemcpyHostToDevice));
d_frontiers.push_back(d_frontier);
}
// We also want one final frontier that determines which edges need their residuals updated.
int* d_edges_effected;
gpuErrchk(hipMalloc((void**) &d_edges_effected, num_edges * sizeof(int)));
gpuErrchk(hipMemcpy(d_edges_effected, edge_effected_.data(), num_edges * sizeof(int), hipMemcpyHostToDevice));
// We will also need a workspace for this to avoid a race condition.
int* d_edges_effected_workspace;
gpuErrchk(hipMalloc((void**) &d_edges_effected_workspace, num_edges * sizeof(int)));
gpuErrchk(hipMemcpy(d_edges_effected_workspace, edge_effected_.data(), num_edges * sizeof(int), hipMemcpyHostToDevice));
//
// Setup GPU Runtime.
//
if (runtime_params.size() < 1) {
std::cout << "RS requires parallelism divisor a, where p = 1/2^a." << std::endl;
return std::tuple<float, std::vector<double>, int, std::vector<std::pair<int, int>>, std::vector<std::pair<float, int>>>(0.0,{},0,{},{});
}
float p = 1.0 / (float) ::pow(2, runtime_params[0]);
// Determine grid/block sizes using CUDA Occupancy calculators.
int minGridSize;
int blockSizeSelect;
int gridSizeSelect;
int nodes_to_update = (int) (num_nodes * p);
nodes_to_update = nodes_to_update == 0 ? 1 : nodes_to_update;
gpuErrchk(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSizeSelect, rs_node_select, 0, 0));
scale_launch_params(&gridSizeSelect, &blockSizeSelect, nodes_to_update);
int blockSizeFrontier;
int gridSizeFrontier;
gpuErrchk(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSizeFrontier, rs_generate_next_frontier, 0, 0));
scale_launch_params(&gridSizeFrontier, &blockSizeFrontier, num_edges);
int blockSizeCalcUpdates;
int gridSizeCalcUpdates;
gpuErrchk(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSizeCalcUpdates, rs_calculate_updates, 0, 0));
scale_launch_params(&gridSizeCalcUpdates, &blockSizeCalcUpdates, num_edges);
int blockSizeEdgeResiduals;
int gridSizeEdgeResiduals;
gpuErrchk(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSizeEdgeResiduals, rs_compute_edge_residuals, 0, 0));
scale_launch_params(&gridSizeEdgeResiduals, &blockSizeEdgeResiduals, num_edges);
int blockSizeNodeResiduals;
int gridSizeNodeResiduals;
gpuErrchk(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSizeNodeResiduals, rs_compute_node_residuals, 0, 0));
scale_launch_params(&gridSizeNodeResiduals, &blockSizeNodeResiduals, num_nodes);
// We instantiate here because we want to know the number of nodes we will be updating.
std::vector<int> node_ids_out_;
for (int i = 0; i < num_nodes; i += ceiling_division(num_nodes, nodes_to_update)) {
node_ids_out_.push_back(i);
}
int i = 1;
while(node_ids_out_.size() < num_nodes) {
node_ids_out_.push_back(i);
++i;
if (i % ceiling_division(num_nodes, nodes_to_update) == 0)
++i;
}
int* d_node_ids_out;
gpuErrchk(hipMalloc((void**) &d_node_ids_out, num_nodes * sizeof(int)));
gpuErrchk(hipMemcpy(d_node_ids_out, node_ids_out_.data(), num_nodes * sizeof(int), hipMemcpyHostToDevice));
// Determine temporary device storage requirements for sorting.
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_node_residuals, d_node_residuals_out, d_node_ids, d_node_ids_out, num_nodes);
// Allocate temporary storage
gpuErrchk(hipMalloc(&d_temp_storage, temp_storage_bytes));
//
// Setup runtime tracking.
//
// Time our code.
std::clock_t begin = std::clock();
std::clock_t since;
float time = 0.0;
// Set up profiling events.
hipEvent_t start, stop;
gpuErrchk(hipEventCreate(&start));
gpuErrchk(hipEventCreate(&stop));
gpuErrchk(hipEventRecord(start));
//
// Run.
//
int iterations = 0;
bool converged = false;
while(!converged && time < timeout) {
++iterations;
// Start by generating our frontiers for this run.
// First we select and mark the first frontier by selecting the top p nodes and adding their outgoing edges.
hipLaunchKernelGGL(( rs_node_select), dim3(gridSizeSelect), dim3(blockSizeSelect), 0, 0, infer_data.first, infer_data.second, d_node_ids_out, d_node_effected, d_frontiers[0], d_edges_effected, num_nodes, num_edges);
// Now we generate the next h-1 frontiers.
for (int frontier_id = 1; frontier_id < h; ++frontier_id) {
hipLaunchKernelGGL(( rs_generate_next_frontier), dim3(gridSizeFrontier), dim3(blockSizeFrontier), 0, 0, infer_data.first, infer_data.second, d_node_effected, d_frontiers[frontier_id - 1], d_frontiers[frontier_id], d_edges_effected, num_edges, true);
}
// Finally we need to extend our edges_effected by one more step.
// To do this we need to use our workspace to avoid a race condition.
// Start by copying the current edges effected into the workspace.
gpuErrchk(hipMemcpy(d_edges_effected_workspace, d_edges_effected, num_edges * sizeof(int), hipMemcpyDeviceToDevice));
// A little hacky this approach but it avoids a bunch of only slightly different code.
hipLaunchKernelGGL(( rs_generate_next_frontier), dim3(gridSizeFrontier), dim3(blockSizeFrontier), 0, 0, infer_data.first, infer_data.second, d_node_effected, d_edges_effected_workspace, d_edges_effected, d_edges_effected, num_edges, false);
// We start with a backwards pass through our frontiers. This is the collection phase, where we make the root node aware of the leaves.
for (int update = h; update > 0; --update) {
// Operate on the specificed frontier.
int* d_frontier = d_frontiers[update - 1];
hipLaunchKernelGGL(( rs_calculate_updates), dim3(gridSizeCalcUpdates), dim3(blockSizeCalcUpdates), 0, 0, infer_data.first, infer_data.second, d_frontier, num_edges, false);
// Set the edges equal to the workspace each time.
gpuErrchk(hipMemcpy(infer_data.second.edges, infer_data.second.workspace, edge_rep_size * sizeof(double), hipMemcpyDeviceToDevice));
}
// Now we do the forwards pass through our frontiers. This is the distribution phase, where we make the leaves aware of the root.
for (int update = 0; update < h; ++update) {
// Operate on the specificed frontier.
hipLaunchKernelGGL(( rs_calculate_updates), dim3(gridSizeCalcUpdates), dim3(blockSizeCalcUpdates), 0, 0, infer_data.first, infer_data.second, d_frontiers[update], num_edges, true);
// Set the edges equal to the workspace each time.
gpuErrchk(hipMemcpy(infer_data.second.edges, infer_data.second.workspace, edge_rep_size * sizeof(double), hipMemcpyDeviceToDevice));
}
// Compute the updated edge residuals.
hipLaunchKernelGGL(( rs_compute_edge_residuals), dim3(gridSizeEdgeResiduals), dim3(blockSizeEdgeResiduals), 0, 0, infer_data.first, infer_data.second, d_edge_residuals, d_edges_effected, num_edges);
gpuErrchk(hipMemcpy(infer_data.second.workspace, infer_data.second.edges, edge_rep_size * sizeof(double), hipMemcpyDeviceToDevice));
// Finally, compute the updated node residuals.
hipLaunchKernelGGL(( rs_compute_node_residuals), dim3(gridSizeNodeResiduals), dim3(blockSizeNodeResiduals), 0, 0, infer_data.first, infer_data.second, d_node_residuals, d_edge_residuals, d_node_effected, num_nodes);
// Clear frontiers. Edges effected and nodes effected are already cleared.
for (int frontier_id = 0; frontier_id < h; ++frontier_id) {
gpuErrchk(hipMemcpy(d_frontiers[frontier_id], edge_effected_.data(), num_edges * sizeof(int), hipMemcpyHostToDevice));
}
// Sort node_residuals using CUB device radix sort.
// Run sorting operation
hipcub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_node_residuals, d_node_residuals_out, d_node_ids, d_node_ids_out, num_nodes);
// Check largest residual. If it's less than epsilon, we know we've converged.
gpuErrchk(hipMemcpy(top_residual, d_node_residuals_out, sizeof(double), hipMemcpyDeviceToHost)); // Only copy back the first value.
converged = *top_residual < epsilon;
since = std::clock();
time = float(since - begin) / CLOCKS_PER_SEC;
}
gpuErrchk(hipEventRecord(stop));
gpuErrchk(hipEventSynchronize(stop));
float milliseconds = 0;
gpuErrchk(hipEventElapsedTime(&milliseconds, start,stop));
// Now the convergence should be complete, and we can launch a new kernel to determine the marginal distributions.
std::vector<double> result = compute_marginals(pgm, infer_data.first, infer_data.second, verbose);
if (verbose) {
print_doubles(result.data(), result.size());
std::cout << "Stopped after " << iterations << " iterations." << std::endl;
}
gpuErrchk(hipFree(d_temp_storage));
gpuErrchk(hipFree(d_node_residuals));
gpuErrchk(hipFree(d_node_residuals_out));
gpuErrchk(hipFree(d_node_ids));
gpuErrchk(hipFree(d_node_ids_out));
gpuErrchk(hipFree(d_edge_residuals));
gpuErrchk(hipFree(d_node_effected));
for (int frontier_id = 0; frontier_id < h; ++frontier_id) {
gpuErrchk(hipFree(d_frontiers[frontier_id]));
}
gpuErrchk(hipFree(d_edges_effected));
gpuErrchk(hipFree(d_edges_effected_workspace));
free(top_residual);
free_gpu_data(infer_data);
return std::tuple<float, std::vector<double>, int, std::vector<std::pair<int,int>>, std::vector<std::pair<float, int>>>(converged ? milliseconds : -1.0, result, converged ? iterations : -1, {}, {});
}
|
cd48f9429ce9097535054ff4ca81b97f19a94c1a.cu
|
//
// Created by Mark Van der Merwe, Summer 2018
//
#include "infer.h"
#include "infer_data.h"
#include "inference_helpers.h"
#include "../header.h"
#include "math_functions.h"
#include <stdio.h>
#include <cub.cuh>
#include <iostream>
#include <cmath>
#include "cuda_profiler_api.h"
#include <ctime>
// edge_frontier - the next frontier of edges to update for the ith step.
// edges_effected - the total frontier of updated edges for this round.
__device__ void advance_edge_frontier(int* edge_frontier, int* edges_effected, int* nodes_effected, int* item_to_outgoing_idx, int* outgoing, int id, int* edge_idx_to_dest_node_idx, bool include_inverse) {
int start_outgoing_idx = item_to_outgoing_idx[id];
int end_outgoing_idx = item_to_outgoing_idx[id + 1];
for (int outgoing_idx = start_outgoing_idx; outgoing_idx < end_outgoing_idx; ++outgoing_idx) {
int edge_id = outgoing[outgoing_idx];
edge_frontier[edge_id] = 1;
// For edges effected, we want to also add in our inverse edge.
if (include_inverse) {
int inverse_edge_id = edge_id % 2 == 0 ? edge_id + 1 : edge_id - 1;
edges_effected[inverse_edge_id] = 1;
}
edges_effected[edge_id] = 1;
nodes_effected[edge_idx_to_dest_node_idx[edge_id]] = 1;
}
}
__global__ void rs_node_select(device_graph d_graph, device_pgm d_pgm, int* node_ids, int* nodes_effected, int* frontier, int* edges_effected, int node_count, int edge_count) {
// Determine the node this thread will be responsible for.
int node = (blockIdx.x * blockDim.x) + threadIdx.x;
if (node < node_count) {
// Map the node number to the actual node id.
int node_id = node_ids[node];
// Determine the edges outgoing from this node.
advance_edge_frontier(frontier, edges_effected, nodes_effected, d_graph.node_idx_to_outgoing_edges, d_graph.node_outgoing_edges, node_id, d_graph.edge_idx_to_dest_node_idx, true);
// Mark this node as needing it's residual updated.
nodes_effected[node_id] = 1;
}
}
__global__ void rs_generate_next_frontier(device_graph d_graph, device_pgm d_pgm, int* nodes_effected, int* last_frontier, int* frontier, int* edges_effected, int edge_count, bool include_inverse) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int step = gridDim.x * blockDim.x;
for (int edge_id = idx; edge_id < edge_count; edge_id += step) {
if (last_frontier[edge_id] == 1) {
// Extend the edges effected to include this one's neighbors.
advance_edge_frontier(frontier, edges_effected, nodes_effected, d_graph.edge_idx_to_outgoing_edges, d_graph.edge_outgoing_edges, edge_id, d_graph.edge_idx_to_dest_node_idx, include_inverse);
}
}
}
__global__ void rs_calculate_updates(device_graph d_graph, device_pgm d_pgm, int* frontier, int edge_count, bool forward) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int step = gridDim.x * blockDim.x;
for (int edge_id = idx; edge_id < edge_count; edge_id += step) {
if (frontier[edge_id] == 1) {
int actual_edge_id;
if (!forward) {
// If we are doing the message collection phase, we need to swap the edge id to be the opposite direction edge.
actual_edge_id = edge_id % 2 == 0 ? edge_id + 1 : edge_id - 1;
} else {
actual_edge_id = edge_id;
}
// Compute the edge.
compute_message(d_graph, d_pgm, actual_edge_id);
}
}
}
__global__ void rs_compute_edge_residuals(device_graph d_graph, device_pgm d_pgm, double* edge_residuals, int* edges_effected, int edge_count) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int step = gridDim.x * blockDim.x;
for (int edge_id = idx; edge_id < edge_count; edge_id += step) {
if (edges_effected[edge_id] == 1) {
// Compute the edge once more. TODO: Cache updates.
compute_message(d_graph, d_pgm, edge_id);
// Now compute the residual from this.
double edge_residual = message_delta(d_pgm.edges, d_pgm.workspace, d_pgm.edge_idx_to_edges_idx[edge_id]);
edge_residuals[edge_id] = edge_residual;
}
// Clear edges effected.
edges_effected[edge_id] = 0;
}
}
__global__ void rs_compute_node_residuals(device_graph d_graph, device_pgm d_pgm, double* node_residuals, double* edge_residuals, int* nodes_effected, int node_count) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int step = gridDim.x * blockDim.x;
for (int node_id = idx; node_id < node_count; node_id += step) {
if (nodes_effected[node_id] == 1) {
// Compute residual of this node.
// Residual is defined as the max of the residuals of the incoming messages.
double residual = 0.0;
int start_incoming_idx = d_graph.node_idx_to_incoming_edges[node_id];
int end_incoming_idx = d_graph.node_idx_to_incoming_edges[node_id + 1];
for (int incoming_idx = start_incoming_idx; incoming_idx < end_incoming_idx; ++incoming_idx) {
double edge_residual = edge_residuals[d_graph.node_incoming_edges[incoming_idx]];
if (edge_residual > residual)
residual = edge_residual;
}
node_residuals[node_id] = residual;
}
// Clear effected nodes.
nodes_effected[node_id] = 0;
}
}
std::tuple<float, std::vector<double>, int, std::vector<std::pair<int, int>>, std::vector<std::pair<float, int>>> infer(pgm* pgm, double epsilon, int timeout, std::vector<int> runtime_params, bool verbose) {
//
// Setup GPU data.
//
std::pair<device_graph, device_pgm> infer_data = setup_gpu_data(pgm);
int num_edges = pgm->num_edges();
int num_nodes = pgm->pgm_graph->node_idx_to_incoming_edges.size() - 1;
int edge_rep_size = pgm->edges.size();
if (verbose) {
std::cout << "Number of edges: " << num_edges << std::endl;
std::cout << "Number of nodes: " << num_nodes << std::endl;
}
// Size of each splash.
int h = 2;
// Create a residual array - each node gets a residual.
// At each round, to determine who to update, we perform a key-value sort and choose the top p keys.
double* d_node_residuals;
gpuErrchk(cudaMalloc((void**) &d_node_residuals, num_nodes * sizeof(double)));
std::vector<double> node_residuals_(num_nodes, 10.0); // Start all node_residuals as 10.0, so all nodes eventually update.
gpuErrchk(cudaMemcpy(d_node_residuals, node_residuals_.data(), num_nodes * sizeof(double), cudaMemcpyHostToDevice));
double* d_node_residuals_out;
gpuErrchk(cudaMalloc((void**) &d_node_residuals_out, num_nodes * sizeof(double)));
double* top_residual = (double*) malloc(sizeof(double));
std::vector<int> node_ids_;
for (int i = 0; i < num_nodes; ++i) {
node_ids_.push_back(i);
}
int* d_node_ids;
gpuErrchk(cudaMalloc((void**) &d_node_ids, num_nodes * sizeof(int)));
gpuErrchk(cudaMemcpy(d_node_ids, node_ids_.data(), num_nodes * sizeof(int), cudaMemcpyHostToDevice));
// We also need residuals for our edges, which are used to compute the node residuals.
double* d_edge_residuals;
gpuErrchk(cudaMalloc((void**) &d_edge_residuals, num_edges * sizeof(double)));
std::vector<double> edge_residuals_(num_edges, 10.0); // Start all edge_residuals as 10.0, so all edges eventually update.
gpuErrchk(cudaMemcpy(d_edge_residuals, edge_residuals_.data(), num_edges * sizeof(double), cudaMemcpyHostToDevice));
// Indicate whether a given node's residual should be updated after other nodes are updated.
int* d_node_effected;
gpuErrchk(cudaMalloc((void**) &d_node_effected, num_nodes * sizeof(int)));
std::vector<int> node_effected_(num_nodes, 0);
gpuErrchk(cudaMemcpy(d_node_effected, node_effected_.data(), num_nodes * sizeof(int), cudaMemcpyHostToDevice));
// TODO: The list of effected nodes to update residuals. Dense approach.
// Create h frontiers that will represent the splash.
std::vector<int> edge_effected_(num_edges, 0);
std::vector<int*> d_frontiers;
for (int frontier_id = 0; frontier_id < h; ++frontier_id) {
int* d_frontier;
gpuErrchk(cudaMalloc((void**) &d_frontier, num_edges * sizeof(int)));
gpuErrchk(cudaMemcpy(d_frontier, edge_effected_.data(), num_edges * sizeof(int), cudaMemcpyHostToDevice));
d_frontiers.push_back(d_frontier);
}
// We also want one final frontier that determines which edges need their residuals updated.
int* d_edges_effected;
gpuErrchk(cudaMalloc((void**) &d_edges_effected, num_edges * sizeof(int)));
gpuErrchk(cudaMemcpy(d_edges_effected, edge_effected_.data(), num_edges * sizeof(int), cudaMemcpyHostToDevice));
// We will also need a workspace for this to avoid a race condition.
int* d_edges_effected_workspace;
gpuErrchk(cudaMalloc((void**) &d_edges_effected_workspace, num_edges * sizeof(int)));
gpuErrchk(cudaMemcpy(d_edges_effected_workspace, edge_effected_.data(), num_edges * sizeof(int), cudaMemcpyHostToDevice));
//
// Setup GPU Runtime.
//
if (runtime_params.size() < 1) {
std::cout << "RS requires parallelism divisor a, where p = 1/2^a." << std::endl;
return std::tuple<float, std::vector<double>, int, std::vector<std::pair<int, int>>, std::vector<std::pair<float, int>>>(0.0,{},0,{},{});
}
float p = 1.0 / (float) std::pow(2, runtime_params[0]);
// Determine grid/block sizes using CUDA Occupancy calculators.
int minGridSize;
int blockSizeSelect;
int gridSizeSelect;
int nodes_to_update = (int) (num_nodes * p);
nodes_to_update = nodes_to_update == 0 ? 1 : nodes_to_update;
gpuErrchk(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSizeSelect, rs_node_select, 0, 0));
scale_launch_params(&gridSizeSelect, &blockSizeSelect, nodes_to_update);
int blockSizeFrontier;
int gridSizeFrontier;
gpuErrchk(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSizeFrontier, rs_generate_next_frontier, 0, 0));
scale_launch_params(&gridSizeFrontier, &blockSizeFrontier, num_edges);
int blockSizeCalcUpdates;
int gridSizeCalcUpdates;
gpuErrchk(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSizeCalcUpdates, rs_calculate_updates, 0, 0));
scale_launch_params(&gridSizeCalcUpdates, &blockSizeCalcUpdates, num_edges);
int blockSizeEdgeResiduals;
int gridSizeEdgeResiduals;
gpuErrchk(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSizeEdgeResiduals, rs_compute_edge_residuals, 0, 0));
scale_launch_params(&gridSizeEdgeResiduals, &blockSizeEdgeResiduals, num_edges);
int blockSizeNodeResiduals;
int gridSizeNodeResiduals;
gpuErrchk(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSizeNodeResiduals, rs_compute_node_residuals, 0, 0));
scale_launch_params(&gridSizeNodeResiduals, &blockSizeNodeResiduals, num_nodes);
// We instantiate here because we want to know the number of nodes we will be updating.
std::vector<int> node_ids_out_;
for (int i = 0; i < num_nodes; i += ceiling_division(num_nodes, nodes_to_update)) {
node_ids_out_.push_back(i);
}
int i = 1;
while(node_ids_out_.size() < num_nodes) {
node_ids_out_.push_back(i);
++i;
if (i % ceiling_division(num_nodes, nodes_to_update) == 0)
++i;
}
int* d_node_ids_out;
gpuErrchk(cudaMalloc((void**) &d_node_ids_out, num_nodes * sizeof(int)));
gpuErrchk(cudaMemcpy(d_node_ids_out, node_ids_out_.data(), num_nodes * sizeof(int), cudaMemcpyHostToDevice));
// Determine temporary device storage requirements for sorting.
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_node_residuals, d_node_residuals_out, d_node_ids, d_node_ids_out, num_nodes);
// Allocate temporary storage
gpuErrchk(cudaMalloc(&d_temp_storage, temp_storage_bytes));
//
// Setup runtime tracking.
//
// Time our code.
std::clock_t begin = std::clock();
std::clock_t since;
float time = 0.0;
// Set up profiling events.
cudaEvent_t start, stop;
gpuErrchk(cudaEventCreate(&start));
gpuErrchk(cudaEventCreate(&stop));
gpuErrchk(cudaEventRecord(start));
//
// Run.
//
int iterations = 0;
bool converged = false;
while(!converged && time < timeout) {
++iterations;
// Start by generating our frontiers for this run.
// First we select and mark the first frontier by selecting the top p nodes and adding their outgoing edges.
rs_node_select<<<gridSizeSelect, blockSizeSelect>>>(infer_data.first, infer_data.second, d_node_ids_out, d_node_effected, d_frontiers[0], d_edges_effected, num_nodes, num_edges);
// Now we generate the next h-1 frontiers.
for (int frontier_id = 1; frontier_id < h; ++frontier_id) {
rs_generate_next_frontier<<<gridSizeFrontier, blockSizeFrontier>>>(infer_data.first, infer_data.second, d_node_effected, d_frontiers[frontier_id - 1], d_frontiers[frontier_id], d_edges_effected, num_edges, true);
}
// Finally we need to extend our edges_effected by one more step.
// To do this we need to use our workspace to avoid a race condition.
// Start by copying the current edges effected into the workspace.
gpuErrchk(cudaMemcpy(d_edges_effected_workspace, d_edges_effected, num_edges * sizeof(int), cudaMemcpyDeviceToDevice));
// A little hacky this approach but it avoids a bunch of only slightly different code.
rs_generate_next_frontier<<<gridSizeFrontier, blockSizeFrontier>>>(infer_data.first, infer_data.second, d_node_effected, d_edges_effected_workspace, d_edges_effected, d_edges_effected, num_edges, false);
// We start with a backwards pass through our frontiers. This is the collection phase, where we make the root node aware of the leaves.
for (int update = h; update > 0; --update) {
// Operate on the specificed frontier.
int* d_frontier = d_frontiers[update - 1];
rs_calculate_updates<<<gridSizeCalcUpdates, blockSizeCalcUpdates>>>(infer_data.first, infer_data.second, d_frontier, num_edges, false);
// Set the edges equal to the workspace each time.
gpuErrchk(cudaMemcpy(infer_data.second.edges, infer_data.second.workspace, edge_rep_size * sizeof(double), cudaMemcpyDeviceToDevice));
}
// Now we do the forwards pass through our frontiers. This is the distribution phase, where we make the leaves aware of the root.
for (int update = 0; update < h; ++update) {
// Operate on the specificed frontier.
rs_calculate_updates<<<gridSizeCalcUpdates, blockSizeCalcUpdates>>>(infer_data.first, infer_data.second, d_frontiers[update], num_edges, true);
// Set the edges equal to the workspace each time.
gpuErrchk(cudaMemcpy(infer_data.second.edges, infer_data.second.workspace, edge_rep_size * sizeof(double), cudaMemcpyDeviceToDevice));
}
// Compute the updated edge residuals.
rs_compute_edge_residuals<<<gridSizeEdgeResiduals, blockSizeEdgeResiduals>>>(infer_data.first, infer_data.second, d_edge_residuals, d_edges_effected, num_edges);
gpuErrchk(cudaMemcpy(infer_data.second.workspace, infer_data.second.edges, edge_rep_size * sizeof(double), cudaMemcpyDeviceToDevice));
// Finally, compute the updated node residuals.
rs_compute_node_residuals<<<gridSizeNodeResiduals, blockSizeNodeResiduals>>>(infer_data.first, infer_data.second, d_node_residuals, d_edge_residuals, d_node_effected, num_nodes);
// Clear frontiers. Edges effected and nodes effected are already cleared.
for (int frontier_id = 0; frontier_id < h; ++frontier_id) {
gpuErrchk(cudaMemcpy(d_frontiers[frontier_id], edge_effected_.data(), num_edges * sizeof(int), cudaMemcpyHostToDevice));
}
// Sort node_residuals using CUB device radix sort.
// Run sorting operation
cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_node_residuals, d_node_residuals_out, d_node_ids, d_node_ids_out, num_nodes);
// Check largest residual. If it's less than epsilon, we know we've converged.
gpuErrchk(cudaMemcpy(top_residual, d_node_residuals_out, sizeof(double), cudaMemcpyDeviceToHost)); // Only copy back the first value.
converged = *top_residual < epsilon;
since = std::clock();
time = float(since - begin) / CLOCKS_PER_SEC;
}
gpuErrchk(cudaEventRecord(stop));
gpuErrchk(cudaEventSynchronize(stop));
float milliseconds = 0;
gpuErrchk(cudaEventElapsedTime(&milliseconds, start,stop));
// Now the convergence should be complete, and we can launch a new kernel to determine the marginal distributions.
std::vector<double> result = compute_marginals(pgm, infer_data.first, infer_data.second, verbose);
if (verbose) {
print_doubles(result.data(), result.size());
std::cout << "Stopped after " << iterations << " iterations." << std::endl;
}
gpuErrchk(cudaFree(d_temp_storage));
gpuErrchk(cudaFree(d_node_residuals));
gpuErrchk(cudaFree(d_node_residuals_out));
gpuErrchk(cudaFree(d_node_ids));
gpuErrchk(cudaFree(d_node_ids_out));
gpuErrchk(cudaFree(d_edge_residuals));
gpuErrchk(cudaFree(d_node_effected));
for (int frontier_id = 0; frontier_id < h; ++frontier_id) {
gpuErrchk(cudaFree(d_frontiers[frontier_id]));
}
gpuErrchk(cudaFree(d_edges_effected));
gpuErrchk(cudaFree(d_edges_effected_workspace));
free(top_residual);
free_gpu_data(infer_data);
return std::tuple<float, std::vector<double>, int, std::vector<std::pair<int,int>>, std::vector<std::pair<float, int>>>(converged ? milliseconds : -1.0, result, converged ? iterations : -1, {}, {});
}
|
50174fae9abe4d601fbe2cb89e60ea6ce219b930.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include "convolutional_layer.h"
#include "deconvolutional_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
extern "C" void forward_deconvolutional_layer_gpu(deconvolutional_layer layer, network_state state)
{
int i;
int out_h = deconvolutional_out_height(layer);
int out_w = deconvolutional_out_width(layer);
int size = out_h*out_w;
int m = layer.size*layer.size*layer.n;
int n = layer.h*layer.w;
int k = layer.c;
fill_ongpu(layer.outputs*layer.batch, 0, layer.output_gpu, 1);
for(i = 0; i < layer.batch; ++i){
float *a = layer.weights_gpu;
float *b = state.input + i*layer.c*layer.h*layer.w;
float *c = layer.col_image_gpu;
gemm_ongpu(1,0,m,n,k,1,a,m,b,n,0,c,n);
col2im_ongpu(c, layer.n, out_h, out_w, layer.size, layer.stride, 0, layer.output_gpu+i*layer.n*size);
}
add_bias_gpu(layer.output_gpu, layer.biases_gpu, layer.batch, layer.n, size);
activate_array(layer.output_gpu, layer.batch*layer.n*size, layer.activation);
}
extern "C" void backward_deconvolutional_layer_gpu(deconvolutional_layer layer, network_state state)
{
float alpha = 1./layer.batch;
int out_h = deconvolutional_out_height(layer);
int out_w = deconvolutional_out_width(layer);
int size = out_h*out_w;
int i;
gradient_array(layer.output_gpu, size*layer.n*layer.batch, layer.activation, layer.delta_gpu);
backward_bias(layer.bias_updates_gpu, layer.delta, layer.batch, layer.n, size);
if(state.delta) memset(state.delta, 0, layer.batch*layer.h*layer.w*layer.c*sizeof(float));
for(i = 0; i < layer.batch; ++i){
int m = layer.c;
int n = layer.size*layer.size*layer.n;
int k = layer.h*layer.w;
float *a = state.input + i*m*n;
float *b = layer.col_image_gpu;
float *c = layer.weight_updates_gpu;
im2col_ongpu(layer.delta_gpu + i*layer.n*size, layer.n, out_h, out_w,
layer.size, layer.stride, 0, b);
gemm_ongpu(0,1,m,n,k,alpha,a,k,b,k,1,c,n);
if(state.delta){
int m = layer.c;
int n = layer.h*layer.w;
int k = layer.size*layer.size*layer.n;
float *a = layer.weights_gpu;
float *b = layer.col_image_gpu;
float *c = state.delta + i*n*m;
gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
}
}
extern "C" void pull_deconvolutional_layer(deconvolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
}
extern "C" void push_deconvolutional_layer(deconvolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
}
extern "C" void update_deconvolutional_layer_gpu(deconvolutional_layer layer, int skip, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
axpy_ongpu(size, -decay, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
|
50174fae9abe4d601fbe2cb89e60ea6ce219b930.cu
|
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include "convolutional_layer.h"
#include "deconvolutional_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
extern "C" void forward_deconvolutional_layer_gpu(deconvolutional_layer layer, network_state state)
{
int i;
int out_h = deconvolutional_out_height(layer);
int out_w = deconvolutional_out_width(layer);
int size = out_h*out_w;
int m = layer.size*layer.size*layer.n;
int n = layer.h*layer.w;
int k = layer.c;
fill_ongpu(layer.outputs*layer.batch, 0, layer.output_gpu, 1);
for(i = 0; i < layer.batch; ++i){
float *a = layer.weights_gpu;
float *b = state.input + i*layer.c*layer.h*layer.w;
float *c = layer.col_image_gpu;
gemm_ongpu(1,0,m,n,k,1,a,m,b,n,0,c,n);
col2im_ongpu(c, layer.n, out_h, out_w, layer.size, layer.stride, 0, layer.output_gpu+i*layer.n*size);
}
add_bias_gpu(layer.output_gpu, layer.biases_gpu, layer.batch, layer.n, size);
activate_array(layer.output_gpu, layer.batch*layer.n*size, layer.activation);
}
extern "C" void backward_deconvolutional_layer_gpu(deconvolutional_layer layer, network_state state)
{
float alpha = 1./layer.batch;
int out_h = deconvolutional_out_height(layer);
int out_w = deconvolutional_out_width(layer);
int size = out_h*out_w;
int i;
gradient_array(layer.output_gpu, size*layer.n*layer.batch, layer.activation, layer.delta_gpu);
backward_bias(layer.bias_updates_gpu, layer.delta, layer.batch, layer.n, size);
if(state.delta) memset(state.delta, 0, layer.batch*layer.h*layer.w*layer.c*sizeof(float));
for(i = 0; i < layer.batch; ++i){
int m = layer.c;
int n = layer.size*layer.size*layer.n;
int k = layer.h*layer.w;
float *a = state.input + i*m*n;
float *b = layer.col_image_gpu;
float *c = layer.weight_updates_gpu;
im2col_ongpu(layer.delta_gpu + i*layer.n*size, layer.n, out_h, out_w,
layer.size, layer.stride, 0, b);
gemm_ongpu(0,1,m,n,k,alpha,a,k,b,k,1,c,n);
if(state.delta){
int m = layer.c;
int n = layer.h*layer.w;
int k = layer.size*layer.size*layer.n;
float *a = layer.weights_gpu;
float *b = layer.col_image_gpu;
float *c = state.delta + i*n*m;
gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
}
}
extern "C" void pull_deconvolutional_layer(deconvolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
}
extern "C" void push_deconvolutional_layer(deconvolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
}
extern "C" void update_deconvolutional_layer_gpu(deconvolutional_layer layer, int skip, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
axpy_ongpu(size, -decay, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
|
52e369f97e97bb1cbdd0aceafb2e030f04d89b3c.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h"
#include "paddle/phi/kernels/funcs/pooling.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
nvinfer1::Dims PoolPlugin::getOutputDimensions(int index,
const nvinfer1::Dims *inputDims,
int nbInputs) TRT_NOEXCEPT {
assert(nbInputs == 1);
assert(index == 0);
assert(inputDims[0].nbDims == 3);
nvinfer1::Dims const &input_dims = inputDims[0];
nvinfer1::Dims output_dims = input_dims;
output_dims.d[1] = output_shape_[1];
output_dims.d[2] = output_shape_[2];
return output_dims;
}
size_t PoolPlugin::getSerializationSize() const TRT_NOEXCEPT {
return getBaseSerializationSize() + SerializedSize(ceil_mode_) +
SerializedSize(pool_type_) + SerializedSize(adaptive_) +
SerializedSize(exclusive_) + SerializedSize(ksize_) +
SerializedSize(strides_) + SerializedSize(paddings_) +
SerializedSize(real_paddings_) + SerializedSize(input_shape_) +
SerializedSize(output_shape_);
}
// TRT will call this func when we need to serialize the configuration of
// tensorrt.
void PoolPlugin::serialize(void *buffer) const TRT_NOEXCEPT {
serializeBase(buffer);
SerializeValue(&buffer, ceil_mode_);
SerializeValue(&buffer, pool_type_);
SerializeValue(&buffer, adaptive_);
SerializeValue(&buffer, exclusive_);
SerializeValue(&buffer, ksize_);
SerializeValue(&buffer, strides_);
SerializeValue(&buffer, paddings_);
SerializeValue(&buffer, real_paddings_);
SerializeValue(&buffer, input_shape_);
SerializeValue(&buffer, output_shape_);
}
PoolPlugin *PoolPlugin::clone() const TRT_NOEXCEPT {
return new PoolPlugin(ceil_mode_,
pool_type_,
adaptive_,
exclusive_,
ksize_,
strides_,
paddings_,
input_shape_,
real_paddings_);
}
int PoolPlugin::enqueue(int batchSize,
const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs,
void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
#else
void *const *outputs,
void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
#endif
auto const &input_dims = this->getInputDims(0);
int input_size = 0;
float const *idata = reinterpret_cast<float const *>(inputs[0]);
float *const *odatas = reinterpret_cast<float *const *>(outputs);
std::vector<int> input_shape = input_shape_;
std::vector<int> output_shape = output_shape_;
input_shape.insert(input_shape.begin(), batchSize);
output_shape.insert(output_shape.begin(), batchSize);
if (pool_type_ == PoolType::max) {
phi::funcs::MaxPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::MaxPool<float>, float>
pool2d_forward;
pool2d_forward(idata,
input_shape,
output_shape,
ksize_,
strides_,
paddings_,
true,
false,
odatas[0],
stream,
pool_process);
} else if (pool_type_ == PoolType::avg) {
phi::funcs::AvgPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::AvgPool<float>, float>
pool2d_forward;
pool2d_forward(idata,
input_shape,
output_shape,
ksize_,
strides_,
paddings_,
exclusive_,
adaptive_,
odatas[0],
stream,
pool_process);
}
return hipGetLastError() != hipSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
PoolPluginDynamic::PoolPluginDynamic(void const *serialData,
size_t serialLength) {
DeserializeValue(&serialData, &serialLength, &ceil_mode_);
const char *pool_type;
DeserializeValue(&serialData, &serialLength, &pool_type);
pool_type_ = std::string(pool_type);
DeserializeValue(&serialData, &serialLength, &adaptive_);
DeserializeValue(&serialData, &serialLength, &exclusive_);
DeserializeValue(&serialData, &serialLength, &ksize_);
DeserializeValue(&serialData, &serialLength, &strides_);
DeserializeValue(&serialData, &serialLength, &paddings_);
DeserializeValue(&serialData, &serialLength, &is_global_);
}
size_t PoolPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
return SerializedSize(ceil_mode_) + SerializedSize(pool_type_.c_str()) +
SerializedSize(adaptive_) + SerializedSize(exclusive_) +
SerializedSize(ksize_) + SerializedSize(strides_) +
SerializedSize(paddings_) + SerializedSize(is_global_);
}
void PoolPluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, ceil_mode_);
SerializeValue(&buffer, pool_type_.c_str());
SerializeValue(&buffer, adaptive_);
SerializeValue(&buffer, exclusive_);
SerializeValue(&buffer, ksize_);
SerializeValue(&buffer, strides_);
SerializeValue(&buffer, paddings_);
SerializeValue(&buffer, is_global_);
}
nvinfer1::IPluginV2DynamicExt *PoolPluginDynamic::clone() const TRT_NOEXCEPT {
return new PoolPluginDynamic(ceil_mode_,
pool_type_,
adaptive_,
exclusive_,
ksize_,
strides_,
paddings_,
is_global_);
}
nvinfer1::DimsExprs PoolPluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs *inputs,
int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(nb_inputs,
1,
platform::errors::InvalidArgument(
"The Split plugin should be only one input."));
nvinfer1::DimsExprs output(inputs[0]);
if (is_global_ && !adaptive_) {
output.d[2] = expr_builder.constant(1);
output.d[3] = expr_builder.constant(1);
return output;
}
if (is_global_ && adaptive_) {
return inputs[0];
}
if (adaptive_) {
output.d[2] = expr_builder.constant(ksize_[0]);
output.d[3] = expr_builder.constant(ksize_[1]);
return output;
}
auto stri_0 = expr_builder.constant(strides_[0]);
auto stri_1 = expr_builder.constant(strides_[1]);
auto one_value = expr_builder.constant(1);
auto v0_tmp = expr_builder.constant(-ksize_[0] + 2 * paddings_[0]);
auto v1_tmp = expr_builder.constant(-ksize_[1] + 2 * paddings_[1]);
auto ceil_tmp =
expr_builder.constant(-ksize_[0] + 2 * paddings_[0] + strides_[0] - 1);
auto ceil1_tmp =
expr_builder.constant(-ksize_[1] + 2 * paddings_[1] + strides_[1] - 1);
if (!ceil_mode_) {
output.d[2] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(
nvinfer1::DimensionOperation::kSUM, *inputs[0].d[2], *v0_tmp),
*stri_0),
*one_value);
output.d[3] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(
nvinfer1::DimensionOperation::kSUM, *inputs[0].d[3], *v1_tmp),
*stri_1),
*one_value);
} else {
output.d[2] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(
nvinfer1::DimensionOperation::kSUM, *inputs[0].d[2], *ceil_tmp),
*stri_0),
*one_value);
output.d[3] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*inputs[0].d[3],
*ceil1_tmp),
*stri_1),
*one_value);
}
return output;
}
bool PoolPluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc *in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
return ((in_out[pos].type == nvinfer1::DataType::kFLOAT) &&
in_out[pos].format == nvinfer1::PluginFormat::kLINEAR);
}
nvinfer1::DataType PoolPluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"The Pool Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ(
(input_types[0] == nvinfer1::DataType::kFLOAT),
true,
platform::errors::InvalidArgument("The input type should be float"));
return input_types[0];
}
int PoolPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
auto input_dims = input_desc[0].dims;
int n = input_dims.d[0];
int c = input_dims.d[1];
int h = input_dims.d[2];
int w = input_dims.d[3];
const float *input = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
std::vector<int> input_shape, output_shape;
for (int i = 0; i < input_dims.nbDims; i++)
input_shape.push_back(input_dims.d[i]);
output_shape = input_shape;
std::vector<int> ksize = ksize_;
std::vector<int> paddings = paddings_;
if (is_global_) {
ksize[0] = h;
ksize[1] = w;
paddings[0] = 0;
paddings[1] = 0;
output_shape[2] = 1;
output_shape[3] = 1;
if (adaptive_) {
output_shape[2] = h;
output_shape[3] = w;
}
} else {
auto data_dim = CalcOutputSize(
{h, w}, ceil_mode_, adaptive_, ksize_, strides_, paddings_);
output_shape[2] = data_dim[0];
output_shape[3] = data_dim[1];
}
if (pool_type_ == "max") {
phi::funcs::MaxPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::MaxPool<float>, float>
pool2d_forward;
pool2d_forward(input,
input_shape,
output_shape,
ksize,
strides_,
paddings,
true,
false,
output,
stream,
pool_process);
} else if (pool_type_ == "avg") {
phi::funcs::AvgPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::AvgPool<float>, float>
pool2d_forward;
pool2d_forward(input,
input_shape,
output_shape,
ksize,
strides_,
paddings,
exclusive_,
adaptive_,
output,
stream,
pool_process);
}
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
52e369f97e97bb1cbdd0aceafb2e030f04d89b3c.cu
|
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h"
#include "paddle/phi/kernels/funcs/pooling.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
nvinfer1::Dims PoolPlugin::getOutputDimensions(int index,
const nvinfer1::Dims *inputDims,
int nbInputs) TRT_NOEXCEPT {
assert(nbInputs == 1);
assert(index == 0);
assert(inputDims[0].nbDims == 3);
nvinfer1::Dims const &input_dims = inputDims[0];
nvinfer1::Dims output_dims = input_dims;
output_dims.d[1] = output_shape_[1];
output_dims.d[2] = output_shape_[2];
return output_dims;
}
size_t PoolPlugin::getSerializationSize() const TRT_NOEXCEPT {
return getBaseSerializationSize() + SerializedSize(ceil_mode_) +
SerializedSize(pool_type_) + SerializedSize(adaptive_) +
SerializedSize(exclusive_) + SerializedSize(ksize_) +
SerializedSize(strides_) + SerializedSize(paddings_) +
SerializedSize(real_paddings_) + SerializedSize(input_shape_) +
SerializedSize(output_shape_);
}
// TRT will call this func when we need to serialize the configuration of
// tensorrt.
void PoolPlugin::serialize(void *buffer) const TRT_NOEXCEPT {
serializeBase(buffer);
SerializeValue(&buffer, ceil_mode_);
SerializeValue(&buffer, pool_type_);
SerializeValue(&buffer, adaptive_);
SerializeValue(&buffer, exclusive_);
SerializeValue(&buffer, ksize_);
SerializeValue(&buffer, strides_);
SerializeValue(&buffer, paddings_);
SerializeValue(&buffer, real_paddings_);
SerializeValue(&buffer, input_shape_);
SerializeValue(&buffer, output_shape_);
}
PoolPlugin *PoolPlugin::clone() const TRT_NOEXCEPT {
return new PoolPlugin(ceil_mode_,
pool_type_,
adaptive_,
exclusive_,
ksize_,
strides_,
paddings_,
input_shape_,
real_paddings_);
}
int PoolPlugin::enqueue(int batchSize,
const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs,
void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
#else
void *const *outputs,
void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
#endif
auto const &input_dims = this->getInputDims(0);
int input_size = 0;
float const *idata = reinterpret_cast<float const *>(inputs[0]);
float *const *odatas = reinterpret_cast<float *const *>(outputs);
std::vector<int> input_shape = input_shape_;
std::vector<int> output_shape = output_shape_;
input_shape.insert(input_shape.begin(), batchSize);
output_shape.insert(output_shape.begin(), batchSize);
if (pool_type_ == PoolType::max) {
phi::funcs::MaxPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::MaxPool<float>, float>
pool2d_forward;
pool2d_forward(idata,
input_shape,
output_shape,
ksize_,
strides_,
paddings_,
true,
false,
odatas[0],
stream,
pool_process);
} else if (pool_type_ == PoolType::avg) {
phi::funcs::AvgPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::AvgPool<float>, float>
pool2d_forward;
pool2d_forward(idata,
input_shape,
output_shape,
ksize_,
strides_,
paddings_,
exclusive_,
adaptive_,
odatas[0],
stream,
pool_process);
}
return cudaGetLastError() != cudaSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
PoolPluginDynamic::PoolPluginDynamic(void const *serialData,
size_t serialLength) {
DeserializeValue(&serialData, &serialLength, &ceil_mode_);
const char *pool_type;
DeserializeValue(&serialData, &serialLength, &pool_type);
pool_type_ = std::string(pool_type);
DeserializeValue(&serialData, &serialLength, &adaptive_);
DeserializeValue(&serialData, &serialLength, &exclusive_);
DeserializeValue(&serialData, &serialLength, &ksize_);
DeserializeValue(&serialData, &serialLength, &strides_);
DeserializeValue(&serialData, &serialLength, &paddings_);
DeserializeValue(&serialData, &serialLength, &is_global_);
}
size_t PoolPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
return SerializedSize(ceil_mode_) + SerializedSize(pool_type_.c_str()) +
SerializedSize(adaptive_) + SerializedSize(exclusive_) +
SerializedSize(ksize_) + SerializedSize(strides_) +
SerializedSize(paddings_) + SerializedSize(is_global_);
}
void PoolPluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, ceil_mode_);
SerializeValue(&buffer, pool_type_.c_str());
SerializeValue(&buffer, adaptive_);
SerializeValue(&buffer, exclusive_);
SerializeValue(&buffer, ksize_);
SerializeValue(&buffer, strides_);
SerializeValue(&buffer, paddings_);
SerializeValue(&buffer, is_global_);
}
nvinfer1::IPluginV2DynamicExt *PoolPluginDynamic::clone() const TRT_NOEXCEPT {
return new PoolPluginDynamic(ceil_mode_,
pool_type_,
adaptive_,
exclusive_,
ksize_,
strides_,
paddings_,
is_global_);
}
nvinfer1::DimsExprs PoolPluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs *inputs,
int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(nb_inputs,
1,
platform::errors::InvalidArgument(
"The Split plugin should be only one input."));
nvinfer1::DimsExprs output(inputs[0]);
if (is_global_ && !adaptive_) {
output.d[2] = expr_builder.constant(1);
output.d[3] = expr_builder.constant(1);
return output;
}
if (is_global_ && adaptive_) {
return inputs[0];
}
if (adaptive_) {
output.d[2] = expr_builder.constant(ksize_[0]);
output.d[3] = expr_builder.constant(ksize_[1]);
return output;
}
auto stri_0 = expr_builder.constant(strides_[0]);
auto stri_1 = expr_builder.constant(strides_[1]);
auto one_value = expr_builder.constant(1);
auto v0_tmp = expr_builder.constant(-ksize_[0] + 2 * paddings_[0]);
auto v1_tmp = expr_builder.constant(-ksize_[1] + 2 * paddings_[1]);
auto ceil_tmp =
expr_builder.constant(-ksize_[0] + 2 * paddings_[0] + strides_[0] - 1);
auto ceil1_tmp =
expr_builder.constant(-ksize_[1] + 2 * paddings_[1] + strides_[1] - 1);
if (!ceil_mode_) {
output.d[2] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(
nvinfer1::DimensionOperation::kSUM, *inputs[0].d[2], *v0_tmp),
*stri_0),
*one_value);
output.d[3] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(
nvinfer1::DimensionOperation::kSUM, *inputs[0].d[3], *v1_tmp),
*stri_1),
*one_value);
} else {
output.d[2] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(
nvinfer1::DimensionOperation::kSUM, *inputs[0].d[2], *ceil_tmp),
*stri_0),
*one_value);
output.d[3] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*inputs[0].d[3],
*ceil1_tmp),
*stri_1),
*one_value);
}
return output;
}
bool PoolPluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc *in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
return ((in_out[pos].type == nvinfer1::DataType::kFLOAT) &&
in_out[pos].format == nvinfer1::PluginFormat::kLINEAR);
}
nvinfer1::DataType PoolPluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"The Pool Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ(
(input_types[0] == nvinfer1::DataType::kFLOAT),
true,
platform::errors::InvalidArgument("The input type should be float"));
return input_types[0];
}
int PoolPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
auto input_dims = input_desc[0].dims;
int n = input_dims.d[0];
int c = input_dims.d[1];
int h = input_dims.d[2];
int w = input_dims.d[3];
const float *input = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
std::vector<int> input_shape, output_shape;
for (int i = 0; i < input_dims.nbDims; i++)
input_shape.push_back(input_dims.d[i]);
output_shape = input_shape;
std::vector<int> ksize = ksize_;
std::vector<int> paddings = paddings_;
if (is_global_) {
ksize[0] = h;
ksize[1] = w;
paddings[0] = 0;
paddings[1] = 0;
output_shape[2] = 1;
output_shape[3] = 1;
if (adaptive_) {
output_shape[2] = h;
output_shape[3] = w;
}
} else {
auto data_dim = CalcOutputSize(
{h, w}, ceil_mode_, adaptive_, ksize_, strides_, paddings_);
output_shape[2] = data_dim[0];
output_shape[3] = data_dim[1];
}
if (pool_type_ == "max") {
phi::funcs::MaxPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::MaxPool<float>, float>
pool2d_forward;
pool2d_forward(input,
input_shape,
output_shape,
ksize,
strides_,
paddings,
true,
false,
output,
stream,
pool_process);
} else if (pool_type_ == "avg") {
phi::funcs::AvgPool<float> pool_process;
phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::AvgPool<float>, float>
pool2d_forward;
pool2d_forward(input,
input_shape,
output_shape,
ksize,
strides_,
paddings,
exclusive_,
adaptive_,
output,
stream,
pool_process);
}
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
451fcda45dc74844b1c2c472b119e22c836bc94c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fastertransformer/common.h"
#include "cuda_kernels.h"
#include "cuda_int8_kernels.h"
#include <assert.h>
#include <cstdio>
#include <cstdlib>
#include <climits>
#include <cfloat>
namespace fastertransformer{
template <typename T>
__inline__ __device__
T gelu(T x)
{
float cdf = 0.5f * (1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x))));
return x * cdf;
}
template <>
__inline__ __device__
half2 gelu(half2 val)
{
half2 val_pow3 = __hmul2(val, __hmul2(val, val));
float2 tmp_pow = __half22float2(val_pow3);
float2 tmp = __half22float2(val);
tmp.x = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.x + 0.044715f * tmp_pow.x))));
tmp.y = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.y + 0.044715f * tmp_pow.y))));
return __hmul2(val, __float22half2_rn(tmp));
}
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)0.0f;
val = warpReduceSum(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)-1e20f;
val = warpReduceMax<T>(val);
return val;
}
//transpose matrix
//for (m n) col-major
//grid((m+31)/32, (n+31)/32)
//block(32, 32)
template<typename T>
__global__
void transposeMatrix_kernel(T*dst, const T* src, const int m, const int n)
{
__shared__ T tile[COL32_][COL32_+1];
int blockx32 = blockIdx.x * COL32_;
int blocky32 = blockIdx.y * COL32_;
int x = blockx32 + threadIdx.x;
int y = blocky32 + threadIdx.y;
bool check = ((x < m) && (y < n));
tile[threadIdx.y][threadIdx.x] = check ? __ldg(src+y*m+x) : T(0);
__syncthreads();
y = blockx32 + threadIdx.y;
x = blocky32 + threadIdx.x;
check = ((x < n) && (y < m));
if (check)
dst[y*n+x] = tile[threadIdx.x][threadIdx.y];
}
//for (m, n) col-major matrix
template <typename T>
void transposeMatrix_kernelLauncher(T* dst, const T* src, const int m, const int n, hipStream_t stream)
{
hipLaunchKernelGGL(( transposeMatrix_kernel<T>), dim3(dim3((m+31)/32, (n+31)/32)), dim3(dim3(32, 32)), 0, stream, dst, src, m, n);
}
template void transposeMatrix_kernelLauncher<float>(float* dst, const float* src, const int m, const int n, hipStream_t stream);
template void transposeMatrix_kernelLauncher<half>(half *dst, const half* src, const int m, const int n, hipStream_t stream);
template void transposeMatrix_kernelLauncher<int8_t>(int8_t* dst, const int8_t* src, const int m, const int n, hipStream_t stream);
template void transposeMatrix_kernelLauncher<int>(int* dst, const int* src, const int m, const int n, hipStream_t stream);
//add bias to matrix of m * n, CUBLASLT_ORDER_COL32
//grid, thread = (m), (n/4)
//using char4
//for per-axis-quantization weight
template <typename T>
__global__
void add_bias_act_COL32_int32I_int8O(int8_t *out, const int32_t* input, const T* bias, const int m, const int n,
const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr)
{
const float input_deQFactor_div127 = __ldg(input_deQFactor_div127_ptr);
const float out_scale = __ldg(out_scale_ptr);
int col_start = threadIdx.x << 2;
char4 *outTmpPtr = (char4 *)out;
char4 tmp;
int inIdx = (col_start & 0xffffffe0) * m + (blockIdx.x << 5) + (col_start&31);
int outIdx = inIdx >> 2;
float val;
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start));
val = gelu(val);
tmp.x = float_to_int8_rn(val*out_scale);
col_start = col_start + 1;
inIdx = inIdx + 1;
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start));
val = gelu(val);
tmp.y = float_to_int8_rn(val*out_scale);
col_start = col_start + 1;
inIdx = inIdx + 1;
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start));
val = gelu(val);
tmp.z = float_to_int8_rn(val*out_scale);
col_start = col_start + 1;
inIdx = inIdx + 1;
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start));
val = gelu(val);
tmp.w = float_to_int8_rn(val*out_scale);
outTmpPtr[outIdx] = tmp;
}
template <>
__global__
void add_bias_act_COL32_int32I_int8O(int8_t *out, const int32_t* input, const half2* bias, const int m, const int n,
const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr)
{
const float input_deQFactor_div127 = __ldg(input_deQFactor_div127_ptr);
const float out_scale = __ldg(out_scale_ptr);
int col_start = threadIdx.x << 2;
int threadIdx2 = threadIdx.x << 1;
char4 *outTmpPtr = (char4 *)out;
char4 tmp;
int inIdx = (col_start & 0xffffffe0) * m + (blockIdx.x << 5) + (col_start&31);
int outIdx = inIdx >> 2;
float val;
half2 biasTmp = __ldg(bias+threadIdx2);
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(biasTmp.x);
val = gelu(val);
tmp.x = float_to_int8_rn(out_scale * val);
col_start = col_start + 1;
inIdx = inIdx + 1;
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(biasTmp.y);
val = gelu(val);
tmp.y = float_to_int8_rn(out_scale * val);
biasTmp = __ldg(bias+threadIdx2+1);
col_start = col_start + 1;
inIdx = inIdx + 1;
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(biasTmp.x);
val = gelu(val);
tmp.z = float_to_int8_rn(out_scale * val);
col_start = col_start + 1;
inIdx = inIdx + 1;
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(biasTmp.y);
val = gelu(val);
tmp.w = float_to_int8_rn(out_scale * val);
outTmpPtr[outIdx] = tmp;
}
template <typename T>
void add_bias_act_COL32_int32I_int8O_kernelLauncher(int8_t *out, const int32_t* input, const T* bias, const int m, const int n,
hipStream_t stream, const float* weight_amax, const float* input_deQFactor_div127_ptr, const float* out_scale_ptr){
dim3 grid(m);
dim3 block(n/4);
assert(block.x <= 1024);
if (sizeof(T) == sizeof(half))
hipLaunchKernelGGL(( add_bias_act_COL32_int32I_int8O), dim3(grid), dim3(block), 0, stream, out, input, (const half2*)bias, m, n, weight_amax, input_deQFactor_div127_ptr, out_scale_ptr);
else
hipLaunchKernelGGL(( add_bias_act_COL32_int32I_int8O<T>), dim3(grid), dim3(block), 0, stream, out, input, bias, m, n, weight_amax, input_deQFactor_div127_ptr, out_scale_ptr);
}
template void add_bias_act_COL32_int32I_int8O_kernelLauncher<float>(int8_t *out, const int32_t* input, const float* bias, const int m, const int n, hipStream_t stream, const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr);
template void add_bias_act_COL32_int32I_int8O_kernelLauncher<half>(int8_t *out, const int32_t* input, const half* bias, const int m, const int n, hipStream_t stream, const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr);
//input1/input2/out matrix with layout of cublasLt CUBLASLT_ORDER_COL32 (m*n)
//(grid, block) must be (m, n/4)
//using char4
template <typename T>
__global__
void add_bias_input_layernorm_COL32_mixIntI_int8O(int8_t* output, const int32_t* input1, const int8_t* input2, const T* bias, const T* gamma,
const T* beta, int m, int n, const float* weight_amax, const float *input1_deQFactor_div127_ptr,
const float *input2_deQFactor_ptr, const float *output_scale_ptr)
{
const float input1_deQFactor_div127 = __ldg(input1_deQFactor_div127_ptr);
const float input2_deQFactor = __ldg(input2_deQFactor_ptr);
const float output_scale = __ldg(output_scale_ptr);
int col_start = threadIdx.x << 2;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out[4];
int input1Idx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31));
int outIdx = input1Idx >> 2;
char4 *outTmpPtr = (char4*)output;
char4 *input2TmpPtr = (char4*)input2;
char4 input2Tmp = __ldg(input2TmpPtr+outIdx);
int col_start_tmp = col_start;
local_out[0] = static_cast<float>(input2Tmp.x)*input2_deQFactor + static_cast<float>(__ldg(input1+input1Idx))*__ldg(weight_amax+col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start_tmp));
col_start_tmp = col_start_tmp + 1;
local_out[1] = static_cast<float>(input2Tmp.y)*input2_deQFactor + static_cast<float>(__ldg(input1+input1Idx+1))*__ldg(weight_amax+col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start_tmp));
col_start_tmp = col_start_tmp + 1;
local_out[2] = static_cast<float>(input2Tmp.z)*input2_deQFactor + static_cast<float>(__ldg(input1+input1Idx+2))*__ldg(weight_amax+col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start_tmp));
col_start_tmp = col_start_tmp + 1;
local_out[3] = static_cast<float>(input2Tmp.w)*input2_deQFactor + static_cast<float>(__ldg(input1+input1Idx+3))*__ldg(weight_amax+col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start_tmp));
mean = blockReduceSum<float>(local_out[0] + local_out[1] + local_out[2] + local_out[3]);
if(threadIdx.x == 0)
s_mean = mean * __fdividef(1.0f, n);
__syncthreads();
local_out[0] = local_out[0] - s_mean;
local_out[1] = local_out[1] - s_mean;
local_out[2] = local_out[2] - s_mean;
local_out[3] = local_out[3] - s_mean;
variance = blockReduceSum<float>(local_out[0] * local_out[0] +
local_out[1] * local_out[1] +
local_out[2] * local_out[2] +
local_out[3] * local_out[3]
);
if(threadIdx.x == 0){
s_variance = variance * __fdividef(1.0f, n) + 1e-6f;
s_variance = rsqrtf(s_variance);
}
__syncthreads();
local_out[0] = (local_out[0] * s_variance) * static_cast<float>(__ldg(gamma+col_start)) + static_cast<float>(__ldg(beta+col_start));
input2Tmp.x = float_to_int8_rn(local_out[0] * output_scale);
col_start = col_start+1;
local_out[1] = (local_out[1] * s_variance) * static_cast<float>(__ldg(gamma+col_start)) + static_cast<float>(__ldg(beta+col_start));
input2Tmp.y = float_to_int8_rn(local_out[1] * output_scale);
col_start = col_start+1;
local_out[2] = (local_out[2] * s_variance) * static_cast<float>(__ldg(gamma+col_start)) + static_cast<float>(__ldg(beta+col_start));
input2Tmp.z = float_to_int8_rn(local_out[2] * output_scale);
col_start = col_start+1;
local_out[3] = (local_out[3] * s_variance) * static_cast<float>(__ldg(gamma+col_start)) + static_cast<float>(__ldg(beta+col_start));
input2Tmp.w = float_to_int8_rn(local_out[3] * output_scale);
outTmpPtr[outIdx] = input2Tmp;
}
template <>
__global__
void add_bias_input_layernorm_COL32_mixIntI_int8O(int8_t* output, const int32_t* input1, const int8_t* input2, const half2* bias, const half2* gamma,
const half2* beta, int m, int n, const float* weight_amax, const float *input1_deQFactor_div127_ptr,
const float *input2_deQFactor_ptr, const float *output_scale_ptr)
{
const float input1_deQFactor_div127 = __ldg(input1_deQFactor_div127_ptr);
const float input2_deQFactor = __ldg(input2_deQFactor_ptr);
const float output_scale = __ldg(output_scale_ptr);
int col_start = threadIdx.x << 2;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out[4];
int input1Idx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31));
int outIdx = input1Idx >> 2;
char4 *outTmpPtr = (char4*)output;
char4 *input2TmpPtr = (char4*)input2;
char4 input2Tmp = __ldg(input2TmpPtr + outIdx);
int col_start_tmp = col_start;
half2 biasTmp = __ldg(bias + (col_start_tmp >> 1));
local_out[0] = static_cast<float>(input2Tmp.x)*input2_deQFactor + static_cast<float>(__ldg(input1 + input1Idx))*__ldg(weight_amax + col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(biasTmp.x);
col_start_tmp = col_start_tmp + 1;
local_out[1] = static_cast<float>(input2Tmp.y)*input2_deQFactor + static_cast<float>(__ldg(input1 + input1Idx + 1))*__ldg(weight_amax + col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(biasTmp.y);
col_start_tmp = col_start_tmp + 1;
biasTmp = __ldg(bias + (col_start_tmp >> 1));
local_out[2] = static_cast<float>(input2Tmp.z)*input2_deQFactor + static_cast<float>(__ldg(input1 + input1Idx + 2))*__ldg(weight_amax + col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(biasTmp.x);
col_start_tmp = col_start_tmp + 1;
local_out[3] = static_cast<float>(input2Tmp.w)*input2_deQFactor + static_cast<float>(__ldg(input1 + (input1Idx+3)))*__ldg(weight_amax + col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(biasTmp.y);
mean = blockReduceSum<float>(local_out[0] + local_out[1] + local_out[2] + local_out[3]);
if(threadIdx.x == 0)
s_mean = mean * __fdividef(1.0f, n);
__syncthreads();
local_out[0] = local_out[0] - s_mean;
local_out[1] = local_out[1] - s_mean;
local_out[2] = local_out[2] - s_mean;
local_out[3] = local_out[3] - s_mean;
variance = blockReduceSum<float>(local_out[0] * local_out[0] +
local_out[1] * local_out[1] +
local_out[2] * local_out[2] +
local_out[3] * local_out[3]
);
if(threadIdx.x == 0){
s_variance = variance * __fdividef(1.0f, n) + 1e-6f;
s_variance = rsqrtf(s_variance);
}
__syncthreads();
col_start_tmp = col_start >> 1;
biasTmp = __ldg(gamma+col_start_tmp);
half2 betaTmp = __ldg(beta+col_start_tmp);
local_out[0] = (local_out[0] * s_variance) * static_cast<float>(biasTmp.x) + static_cast<float>(betaTmp.x);
input2Tmp.x = float_to_int8_rn(local_out[0] * output_scale);
col_start = col_start+1;
local_out[1] = (local_out[1] * s_variance) * static_cast<float>(biasTmp.y) + static_cast<float>(betaTmp.y);
input2Tmp.y = float_to_int8_rn(local_out[1] * output_scale);
col_start = col_start+1;
col_start_tmp = col_start >> 1;
biasTmp = __ldg(gamma+col_start_tmp);
betaTmp = __ldg(beta+col_start_tmp);
local_out[2] = (local_out[2] * s_variance) * static_cast<float>(biasTmp.x) + static_cast<float>(betaTmp.x);
input2Tmp.z = float_to_int8_rn(local_out[2] * output_scale);
col_start = col_start+1;
local_out[3] = (local_out[3] * s_variance) * static_cast<float>(biasTmp.y) + static_cast<float>(betaTmp.y);
input2Tmp.w = float_to_int8_rn(local_out[3] * output_scale);
outTmpPtr[outIdx] = input2Tmp;
}
template<typename T>
void add_bias_input_layernorm_COL32_mixIntI_int8O_kernelLauncher(int8_t* output, const int32_t* input1, const int8_t* input2, const T* bias, const T* gamma,
const T* beta, int m, int n, hipStream_t stream, const float* weight_amax,
const float *input1_deQFactor_div127_ptr, const float *input2_deQFactor_ptr, const float *output_scale_ptr)
{
dim3 grid(m);
dim3 block(n/4);
assert(n <= 1024);
if (sizeof(T) == sizeof(half)){
hipLaunchKernelGGL(( add_bias_input_layernorm_COL32_mixIntI_int8O), dim3(grid), dim3(block), 0, stream, output, input1, input2, (const half2*)bias, (const half2*)gamma,
(const half2*)beta, m, n, weight_amax, input1_deQFactor_div127_ptr,
input2_deQFactor_ptr, output_scale_ptr);
}
else{
hipLaunchKernelGGL(( add_bias_input_layernorm_COL32_mixIntI_int8O<T>), dim3(grid), dim3(block), 0, stream, output, input1, input2, bias, gamma, beta,
m, n, weight_amax, input1_deQFactor_div127_ptr,
input2_deQFactor_ptr, output_scale_ptr);
}
}
template void add_bias_input_layernorm_COL32_mixIntI_int8O_kernelLauncher<float>(int8_t* output, const int32_t* input1, const int8_t* input2, const float* bias, const float* gamma, const float* beta, int m, int n, hipStream_t stream, const float* weight_amax, const float *input1_deQFactor_div127_ptr, const float *input2_deQFactor_ptr, const float *output_scale_ptr);
template void add_bias_input_layernorm_COL32_mixIntI_int8O_kernelLauncher<half>(int8_t* output, const int32_t* input1, const int8_t* input2, const half* bias, const half* gamma, const half* beta, int m, int n, hipStream_t stream, const float* weight_amax, const float *input1_deQFactor_div127_ptr, const float *input2_deQFactor_ptr, const float *output_scale_ptr);
//input1/input2/output matrix with layout of cublasLt CUBLASLT_ORDER_COL32 (m*n)
//(grid, block) must be (m, n)
//for per_axis_quantization for weight
template <typename T>
__global__
void add_bias_input_layernorm_COL32_int32I_DataTypeO(T* output, const int32_t* input1, const T* input2, const T* bias, const T* gamma,
const T* beta, int m, int n, const float* weight_amax, const float *input1_amax_ptr)
{
const float input1_amax = __ldg(input1_amax_ptr);
int col_start = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out;
int outIdx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31));
float tmp = static_cast<float>(__ldg(input1 + outIdx)) * static_cast<float>(__ldg(weight_amax + col_start)) * input1_amax * 0.000062f; //(1/127/127);
float inputTmp = static_cast<float>(__ldg(input2 + outIdx));
local_out = tmp + inputTmp + static_cast<float>(__ldg(bias + col_start));
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean * __fdividef(1.0f, n);
__syncthreads();
local_out = local_out - s_mean;
variance = blockReduceSum<float>(local_out * local_out);
if(threadIdx.x == 0){
s_variance = variance * __fdividef(1.0f, n) + 1e-6f;
s_variance = rsqrtf(s_variance);
}
__syncthreads();
local_out = (local_out * s_variance) * static_cast<float>(__ldg(gamma + col_start)) + static_cast<float>(__ldg(beta + col_start));
output[outIdx] = local_out;
}
template <>
__global__
void add_bias_input_layernorm_COL32_int32I_DataTypeO(half2* output, const int32_t* input1, const half2* input2, const half2* bias, const half2* gamma,
const half2* beta, int m, int n, const float* weight_amax, const float *input1_amax_ptr)
{
int col_start = threadIdx.x << 1;
const float input1_amax = __ldg(input1_amax_ptr);
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out, local_out2;
int outIdx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31));
float tmp = static_cast<float>(__ldg(input1 + outIdx)) * __ldg(weight_amax + col_start) * input1_amax * 0.000062f; //(1/127/127);
float tmp2 = static_cast<float>(__ldg(input1 + outIdx + 1)) * __ldg(weight_amax + col_start + 1) * input1_amax * 0.000062f; //(1/127/127);
outIdx = outIdx >> 1;
half2 inputTmp = __ldg(input2 + outIdx);
half2 biasTmp = __ldg(bias + threadIdx.x);
local_out = tmp + static_cast<float>(inputTmp.x) + static_cast<float>(biasTmp.x);
local_out2 = tmp2 + static_cast<float>(inputTmp.y) + static_cast<float>(biasTmp.y);
mean = blockReduceSum<float>(local_out + local_out2);
if(threadIdx.x == 0)
s_mean = mean * __fdividef(1.0f, n);
__syncthreads();
local_out = local_out - s_mean;
local_out2 = local_out2 - s_mean;
variance = blockReduceSum<float>(local_out*local_out + local_out2*local_out2);
if(threadIdx.x == 0){
s_variance = variance * __fdividef(1.0f, n) + 1e-6f;
s_variance = rsqrtf(s_variance);
}
__syncthreads();
float2 outputTmp;
inputTmp = __ldg(gamma + threadIdx.x);
biasTmp = __ldg(beta + threadIdx.x);
outputTmp.x = (local_out * s_variance) * static_cast<float>(inputTmp.x) + static_cast<float>(biasTmp.x);
outputTmp.y = (local_out2 * s_variance) * static_cast<float>(inputTmp.y) + static_cast<float>(biasTmp.y);
inputTmp = __float22half2_rn(outputTmp);
output[outIdx] = inputTmp;
}
template <typename T>
void add_bias_input_layernorm_COL32_int32I_DataTypeO_kernelLauncher(T* output, const int32_t* input1, const T* input2, const T* bias, const T* gamma,
const T* beta, int m, int n, hipStream_t stream, const float* weight_amax,
const float* input1_amax_ptr){
dim3 grid(m);
dim3 block(n);
if (sizeof(T) == sizeof(half)){
block.x /= 2;
assert(block.x <= 1024);
hipLaunchKernelGGL(( add_bias_input_layernorm_COL32_int32I_DataTypeO), dim3(grid), dim3(block), 0, stream, (half2 *)output, input1, (const half2 *)input2, (const half2 *)bias, (const half2 *)gamma,
(const half2 *)beta, m, n, weight_amax, input1_amax_ptr);
}
else{
assert(block.x <= 1024);
hipLaunchKernelGGL(( add_bias_input_layernorm_COL32_int32I_DataTypeO<T>), dim3(grid), dim3(block), 0, stream, output, input1, input2, bias, gamma,
beta, m, n, weight_amax, input1_amax_ptr);
}
}
template void add_bias_input_layernorm_COL32_int32I_DataTypeO_kernelLauncher<float>(float* output, const int32_t* input1, const float* input2, const float* bias, const float* gamma, const float* beta, int m, int n, hipStream_t stream, const float* weight_amax, const float *input1_amax_ptr);
template void add_bias_input_layernorm_COL32_int32I_DataTypeO_kernelLauncher<half>(half* output, const int32_t* input1, const half* input2, const half* bias, const half* gamma, const half* beta, int m, int n, hipStream_t stream, const float* weight_amax, const float *input1_amax_ptr);
//input matrix is m*n column-major
//output matrix is m*n CUBLASLT_ORDER_COL32
//(grid, block) must be (m, n)
template <typename T>
__global__
void FT_transformA(T* dst, const T* src, int m, int n)
{
int inIdx = threadIdx.x * m + blockIdx.x;
int col_start = threadIdx.x;
int outIdx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31));
dst[outIdx] = __ldg(&src[inIdx]);
}
template <typename T>
void FT_transformA_kernelLauncher(T* dst, const T* src, int m, int n, hipStream_t stream){
dim3 grid(m);
dim3 block(n);
assert(block.x <= 1024);
hipLaunchKernelGGL(( FT_transformA), dim3(grid), dim3(block), 0, stream, dst, src, m, n);
}
template void FT_transformA_kernelLauncher(float* dst, const float* src, int m, int n, hipStream_t stream);
template void FT_transformA_kernelLauncher(half* dst, const half* src, int m, int n, hipStream_t stream);
//input matrix is m*n CUBLASLT_ORDER_COL32
//output matrix is m*n column-major
//(grid, block) must be (m, n)
template <typename T>
__global__
void FT_transformC(T* dst, const T* src, int m, int n)
{
int outIdx = threadIdx.x * m + blockIdx.x;
int col_start = threadIdx.x;
int inIdx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31));
dst[outIdx] = __ldg(&src[inIdx]);
}
template <typename T>
void FT_transformC_kernelLauncher(T* dst, const T* src, int m, int n, hipStream_t stream){
dim3 grid(m);
dim3 block(n);
assert(block.x <= 1024);
hipLaunchKernelGGL(( FT_transformC), dim3(grid), dim3(block), 0, stream, dst, src, m, n);
}
template void FT_transformC_kernelLauncher(float* dst, const float* src, int m, int n, hipStream_t stream);
template void FT_transformC_kernelLauncher(half* dst, const half* src, int m, int n, hipStream_t stream);
template void FT_transformC_kernelLauncher(int8_t* dst, const int8_t* src, int m, int n, hipStream_t stream);
//src is the result of batch MM, whose size is batch_size*head_num*(seq_len, size_per_head), CUBLASLT_ORDER_COL32
//dst is of m = batch_size*seq_len, k(n) = head_num*size_per_head, CUBLASLT_ORDER_COL32
//grid(seq_len, batch_size)
//block(size_per_head/4, head_num)
//assume size_per_head is multiples of 32
__global__
void transpose_COL32_kernel(int8_t* dst, const int32_t* src, const int batch_size, const int seq_len, const int head_num,
const int size_per_head, const float *v_buf_addBias_deQFactor, const float* qk_afterSM_deQFactor, const float* out_scale_ptr,
const int batch_size_x_seq_len, const int seq_len_x_size_per_head)
{
const float scale = __ldg(v_buf_addBias_deQFactor) * __ldg(qk_afterSM_deQFactor) * __ldg(out_scale_ptr);
int threadIdx4 = threadIdx.x << 2;
int batch_id = blockIdx.y;
int seq_id = blockIdx.x;
int head_id = threadIdx.y;
//get the (row, col) output layout of m*k
//m = batch_size*seq_len
//k = head_num*size_per_head
int mk_row = batch_id*seq_len + seq_id;
int mk_col = head_id*size_per_head + threadIdx4;
//get the (row, col) layout of COL32; leading dimension = 32*m = 32*batch_size*seq_len
int COL32_row = (mk_row << 5) + (mk_col&31);
int COL32_col = mk_col >> 5;
int outIdx = ((COL32_col << 5)*batch_size_x_seq_len + COL32_row) >> 2;
//get the (row, col) input layout of m'*k'
//m' = seq_len
//k' = size_per_head
mk_row = seq_id;
mk_col = threadIdx4;
//get the (row, col) layout of COL32; leading dimension = 32*m' = 32*seq_len
COL32_row = (mk_row << 5) + (mk_col&31);
COL32_col = mk_col >> 5;
int inIdx = (batch_id*head_num + head_id)*seq_len_x_size_per_head + (COL32_col << 5 )*seq_len + COL32_row;
char4 tmp;
tmp.x = float_to_int8_rn(__ldg(src+inIdx)*scale);
tmp.y = float_to_int8_rn(__ldg(src+inIdx+1)*scale);
tmp.z = float_to_int8_rn(__ldg(src+inIdx+2)*scale);
tmp.w = float_to_int8_rn(__ldg(src+inIdx+3)*scale);
char4 *dst_ptr4 = (char4 *)dst;
dst_ptr4[outIdx] = tmp;
}
void transpose_COL32_kernelLauncher(int8_t* dst, const int* src, const int batch_size, const int seq_len, const int head_num,
const int size_per_head, const float *v_buf_addBias_deQFactor, const float* qk_afterSM_deQFactor,
const float* out_scale_ptr, hipStream_t stream){
assert(size_per_head%32==0);
hipLaunchKernelGGL(( transpose_COL32_kernel), dim3(dim3(seq_len, batch_size)), dim3(dim3(size_per_head/4, head_num)), 0, stream, dst, src, batch_size, seq_len, head_num, size_per_head, v_buf_addBias_deQFactor, qk_afterSM_deQFactor, out_scale_ptr, batch_size*seq_len, seq_len*size_per_head);
}
//src is the result of batch MM, whose size is batch_size*head_num*(seq_len, size_per_head), CUBLASLT_ORDER_COL32
//dst is of m = valid_word_num, k(n) = head_num*size_per_head, CUBLASLT_ORDER_COL32
//grid(seq_len, batch_size)
//block(size_per_head/4, head_num)
//assume size_per_head is multiples of 32
__global__
void transpose_COL32_rebuild_padding_kernel(int8_t* dst, const int32_t* src, const int* sequence_id_map, const int valid_word_num, const int batch_size,
const int seq_len, const int head_num, const int size_per_head, const float *v_buf_addBias_deQFactor,
const float* qk_afterSM_deQFactor, const float* out_scale_ptr, const int seq_len_x_size_per_head)
{
const float scale = __ldg(v_buf_addBias_deQFactor) * __ldg(qk_afterSM_deQFactor) * __ldg(out_scale_ptr);
int threadIdx4 = threadIdx.x << 2;
int batch_id = blockIdx.y;
int seq_id = blockIdx.x;
int head_id = threadIdx.y;
//get the (row, col) output layout of m*k
//m = valid_word_num
//k = head_num*size_per_head
int mk_row = __ldg(sequence_id_map + batch_id*seq_len + seq_id);
if (mk_row >= 0){
int mk_col = head_id*size_per_head + threadIdx4;
//get the (row, col) layout of COL32; leading dimension = 32*m = 32*valid_word_num
int COL32_row = (mk_row << 5) + (mk_col&31);
int COL32_col = mk_col >> 5;
int outIdx = ((COL32_col << 5)*valid_word_num + COL32_row) >> 2;
//get the (row, col) input layout of m'*k'
//m' = seq_len
//k' = size_per_head
mk_row = seq_id;
mk_col = threadIdx4;
//get the (row, col) layout of COL32; leading dimension = 32*m' = 32*seq_len
COL32_row = (mk_row << 5) + (mk_col&31);
COL32_col = mk_col >> 5;
int inIdx = (batch_id*head_num + head_id)*seq_len_x_size_per_head + (COL32_col << 5 )*seq_len + COL32_row;
char4 tmp;
tmp.x = float_to_int8_rn(__ldg(src+inIdx)*scale);
tmp.y = float_to_int8_rn(__ldg(src+inIdx+1)*scale);
tmp.z = float_to_int8_rn(__ldg(src+inIdx+2)*scale);
tmp.w = float_to_int8_rn(__ldg(src+inIdx+3)*scale);
char4 *dst_ptr4 = (char4 *)dst;
dst_ptr4[outIdx] = tmp;
}
}
void transpose_COL32_rebuild_padding_kernelLauncher(int8_t* dst, const int* src, const int* sequence_id_map, const int valid_word_num, const int batch_size,
const int seq_len, const int head_num, const int size_per_head, const float *v_buf_addBias_deQFactor,
const float* qk_afterSM_deQFactor, const float* out_scale_ptr, hipStream_t stream){
assert(size_per_head%32==0);
hipLaunchKernelGGL(( transpose_COL32_rebuild_padding_kernel), dim3(dim3(seq_len, batch_size)), dim3(dim3(size_per_head/4, head_num)), 0, stream, dst, src, sequence_id_map, valid_word_num, batch_size,
seq_len, head_num, size_per_head, v_buf_addBias_deQFactor,
qk_afterSM_deQFactor, out_scale_ptr, seq_len*size_per_head);
}
template <typename T>
__global__
void quantized_kernel(int8_t *dst, const T* src, const int size, const float* scale_ptr)
{
int tid = (blockIdx.x*blockDim.x + threadIdx.x) << 2;
if (tid < size){
const float scale = __ldg(scale_ptr);
char4 tmp;
tmp.x = float_to_int8_rn(static_cast<float>(__ldg(&src[tid]))*scale);
tmp.y = float_to_int8_rn(static_cast<float>(__ldg(&src[tid+1]))*scale);
tmp.z = float_to_int8_rn(static_cast<float>(__ldg(&src[tid+2]))*scale);
tmp.w = float_to_int8_rn(static_cast<float>(__ldg(&src[tid+3]))*scale);
char4 *dst_ptr4 = (char4 *)dst;
dst_ptr4[tid >> 2] = tmp;
}
}
template <typename T>
void quantized_kernelLauncher(int8_t* dst, const T * src, const int size, const float* scale_ptr, hipStream_t stream)
{
assert(size % (4 * 64) == 0);
dim3 grid((size+255)/256);
dim3 block(64);
hipLaunchKernelGGL(( quantized_kernel<T>), dim3(grid), dim3(block), 0, stream, dst, src, size, scale_ptr);
}
template void quantized_kernelLauncher<float>(int8_t* dst, const float * src, const int size, const float* scale_ptr, hipStream_t stream);
template void quantized_kernelLauncher<half>(int8_t* dst, const half * src, const int size, const float* scale_ptr, hipStream_t stream);
template void quantized_kernelLauncher<int32_t>(int8_t* dst, const int32_t * src, const int size, const float* scale_ptr, hipStream_t stream);
template <typename T>
__global__
void dequantized_kernel(T *dst, const int8_t* src, const int size, const float *scale_ptr)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid < size){
float tmp = float(src[tid]);
dst[tid] = T(float(tmp) * __ldg(scale_ptr));
}
}
template <typename T>
void dequantized_kernelLauncher(T* dst, const int8_t * src, const int size, const float *scale_ptr, hipStream_t stream)
{
dim3 grid((size+255)/256);
dim3 block(256);
hipLaunchKernelGGL(( dequantized_kernel<T>), dim3(grid), dim3(block), 0, stream, dst, src, size, scale_ptr);
}
template void dequantized_kernelLauncher<float>(float* dst, const int8_t * src, const int size, const float *scale_ptr, hipStream_t stream);
template void dequantized_kernelLauncher<half>(half* dst, const int8_t * src, const int size, const float *scale_ptr, hipStream_t stream);
template void dequantized_kernelLauncher<int32_t>(int32_t* dst, const int8_t * src, const int size, const float *scale_ptr, hipStream_t stream);
}//namespace
|
451fcda45dc74844b1c2c472b119e22c836bc94c.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fastertransformer/common.h"
#include "cuda_kernels.h"
#include "cuda_int8_kernels.h"
#include <assert.h>
#include <cstdio>
#include <cstdlib>
#include <climits>
#include <cfloat>
namespace fastertransformer{
template <typename T>
__inline__ __device__
T gelu(T x)
{
float cdf = 0.5f * (1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x))));
return x * cdf;
}
template <>
__inline__ __device__
half2 gelu(half2 val)
{
half2 val_pow3 = __hmul2(val, __hmul2(val, val));
float2 tmp_pow = __half22float2(val_pow3);
float2 tmp = __half22float2(val);
tmp.x = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.x + 0.044715f * tmp_pow.x))));
tmp.y = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.y + 0.044715f * tmp_pow.y))));
return __hmul2(val, __float22half2_rn(tmp));
}
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)0.0f;
val = warpReduceSum(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)-1e20f;
val = warpReduceMax<T>(val);
return val;
}
//transpose matrix
//for (m n) col-major
//grid((m+31)/32, (n+31)/32)
//block(32, 32)
template<typename T>
__global__
void transposeMatrix_kernel(T*dst, const T* src, const int m, const int n)
{
__shared__ T tile[COL32_][COL32_+1];
int blockx32 = blockIdx.x * COL32_;
int blocky32 = blockIdx.y * COL32_;
int x = blockx32 + threadIdx.x;
int y = blocky32 + threadIdx.y;
bool check = ((x < m) && (y < n));
tile[threadIdx.y][threadIdx.x] = check ? __ldg(src+y*m+x) : T(0);
__syncthreads();
y = blockx32 + threadIdx.y;
x = blocky32 + threadIdx.x;
check = ((x < n) && (y < m));
if (check)
dst[y*n+x] = tile[threadIdx.x][threadIdx.y];
}
//for (m, n) col-major matrix
template <typename T>
void transposeMatrix_kernelLauncher(T* dst, const T* src, const int m, const int n, cudaStream_t stream)
{
transposeMatrix_kernel<T><<<dim3((m+31)/32, (n+31)/32), dim3(32, 32), 0, stream>>>(dst, src, m, n);
}
template void transposeMatrix_kernelLauncher<float>(float* dst, const float* src, const int m, const int n, cudaStream_t stream);
template void transposeMatrix_kernelLauncher<half>(half *dst, const half* src, const int m, const int n, cudaStream_t stream);
template void transposeMatrix_kernelLauncher<int8_t>(int8_t* dst, const int8_t* src, const int m, const int n, cudaStream_t stream);
template void transposeMatrix_kernelLauncher<int>(int* dst, const int* src, const int m, const int n, cudaStream_t stream);
//add bias to matrix of m * n, CUBLASLT_ORDER_COL32
//grid, thread = (m), (n/4)
//using char4
//for per-axis-quantization weight
template <typename T>
__global__
void add_bias_act_COL32_int32I_int8O(int8_t *out, const int32_t* input, const T* bias, const int m, const int n,
const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr)
{
const float input_deQFactor_div127 = __ldg(input_deQFactor_div127_ptr);
const float out_scale = __ldg(out_scale_ptr);
int col_start = threadIdx.x << 2;
char4 *outTmpPtr = (char4 *)out;
char4 tmp;
int inIdx = (col_start & 0xffffffe0) * m + (blockIdx.x << 5) + (col_start&31);
int outIdx = inIdx >> 2;
float val;
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start));
val = gelu(val);
tmp.x = float_to_int8_rn(val*out_scale);
col_start = col_start + 1;
inIdx = inIdx + 1;
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start));
val = gelu(val);
tmp.y = float_to_int8_rn(val*out_scale);
col_start = col_start + 1;
inIdx = inIdx + 1;
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start));
val = gelu(val);
tmp.z = float_to_int8_rn(val*out_scale);
col_start = col_start + 1;
inIdx = inIdx + 1;
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start));
val = gelu(val);
tmp.w = float_to_int8_rn(val*out_scale);
outTmpPtr[outIdx] = tmp;
}
template <>
__global__
void add_bias_act_COL32_int32I_int8O(int8_t *out, const int32_t* input, const half2* bias, const int m, const int n,
const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr)
{
const float input_deQFactor_div127 = __ldg(input_deQFactor_div127_ptr);
const float out_scale = __ldg(out_scale_ptr);
int col_start = threadIdx.x << 2;
int threadIdx2 = threadIdx.x << 1;
char4 *outTmpPtr = (char4 *)out;
char4 tmp;
int inIdx = (col_start & 0xffffffe0) * m + (blockIdx.x << 5) + (col_start&31);
int outIdx = inIdx >> 2;
float val;
half2 biasTmp = __ldg(bias+threadIdx2);
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(biasTmp.x);
val = gelu(val);
tmp.x = float_to_int8_rn(out_scale * val);
col_start = col_start + 1;
inIdx = inIdx + 1;
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(biasTmp.y);
val = gelu(val);
tmp.y = float_to_int8_rn(out_scale * val);
biasTmp = __ldg(bias+threadIdx2+1);
col_start = col_start + 1;
inIdx = inIdx + 1;
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(biasTmp.x);
val = gelu(val);
tmp.z = float_to_int8_rn(out_scale * val);
col_start = col_start + 1;
inIdx = inIdx + 1;
val = static_cast<float>(__ldg(input+inIdx))*__ldg(weight_amax+col_start)*input_deQFactor_div127 + static_cast<float>(biasTmp.y);
val = gelu(val);
tmp.w = float_to_int8_rn(out_scale * val);
outTmpPtr[outIdx] = tmp;
}
template <typename T>
void add_bias_act_COL32_int32I_int8O_kernelLauncher(int8_t *out, const int32_t* input, const T* bias, const int m, const int n,
cudaStream_t stream, const float* weight_amax, const float* input_deQFactor_div127_ptr, const float* out_scale_ptr){
dim3 grid(m);
dim3 block(n/4);
assert(block.x <= 1024);
if (sizeof(T) == sizeof(half))
add_bias_act_COL32_int32I_int8O<<<grid, block, 0, stream>>>(out, input, (const half2*)bias, m, n, weight_amax, input_deQFactor_div127_ptr, out_scale_ptr);
else
add_bias_act_COL32_int32I_int8O<T><<<grid, block, 0, stream>>>(out, input, bias, m, n, weight_amax, input_deQFactor_div127_ptr, out_scale_ptr);
}
template void add_bias_act_COL32_int32I_int8O_kernelLauncher<float>(int8_t *out, const int32_t* input, const float* bias, const int m, const int n, cudaStream_t stream, const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr);
template void add_bias_act_COL32_int32I_int8O_kernelLauncher<half>(int8_t *out, const int32_t* input, const half* bias, const int m, const int n, cudaStream_t stream, const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr);
//input1/input2/out matrix with layout of cublasLt CUBLASLT_ORDER_COL32 (m*n)
//(grid, block) must be (m, n/4)
//using char4
template <typename T>
__global__
void add_bias_input_layernorm_COL32_mixIntI_int8O(int8_t* output, const int32_t* input1, const int8_t* input2, const T* bias, const T* gamma,
const T* beta, int m, int n, const float* weight_amax, const float *input1_deQFactor_div127_ptr,
const float *input2_deQFactor_ptr, const float *output_scale_ptr)
{
const float input1_deQFactor_div127 = __ldg(input1_deQFactor_div127_ptr);
const float input2_deQFactor = __ldg(input2_deQFactor_ptr);
const float output_scale = __ldg(output_scale_ptr);
int col_start = threadIdx.x << 2;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out[4];
int input1Idx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31));
int outIdx = input1Idx >> 2;
char4 *outTmpPtr = (char4*)output;
char4 *input2TmpPtr = (char4*)input2;
char4 input2Tmp = __ldg(input2TmpPtr+outIdx);
int col_start_tmp = col_start;
local_out[0] = static_cast<float>(input2Tmp.x)*input2_deQFactor + static_cast<float>(__ldg(input1+input1Idx))*__ldg(weight_amax+col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start_tmp));
col_start_tmp = col_start_tmp + 1;
local_out[1] = static_cast<float>(input2Tmp.y)*input2_deQFactor + static_cast<float>(__ldg(input1+input1Idx+1))*__ldg(weight_amax+col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start_tmp));
col_start_tmp = col_start_tmp + 1;
local_out[2] = static_cast<float>(input2Tmp.z)*input2_deQFactor + static_cast<float>(__ldg(input1+input1Idx+2))*__ldg(weight_amax+col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start_tmp));
col_start_tmp = col_start_tmp + 1;
local_out[3] = static_cast<float>(input2Tmp.w)*input2_deQFactor + static_cast<float>(__ldg(input1+input1Idx+3))*__ldg(weight_amax+col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(__ldg(bias+col_start_tmp));
mean = blockReduceSum<float>(local_out[0] + local_out[1] + local_out[2] + local_out[3]);
if(threadIdx.x == 0)
s_mean = mean * __fdividef(1.0f, n);
__syncthreads();
local_out[0] = local_out[0] - s_mean;
local_out[1] = local_out[1] - s_mean;
local_out[2] = local_out[2] - s_mean;
local_out[3] = local_out[3] - s_mean;
variance = blockReduceSum<float>(local_out[0] * local_out[0] +
local_out[1] * local_out[1] +
local_out[2] * local_out[2] +
local_out[3] * local_out[3]
);
if(threadIdx.x == 0){
s_variance = variance * __fdividef(1.0f, n) + 1e-6f;
s_variance = rsqrtf(s_variance);
}
__syncthreads();
local_out[0] = (local_out[0] * s_variance) * static_cast<float>(__ldg(gamma+col_start)) + static_cast<float>(__ldg(beta+col_start));
input2Tmp.x = float_to_int8_rn(local_out[0] * output_scale);
col_start = col_start+1;
local_out[1] = (local_out[1] * s_variance) * static_cast<float>(__ldg(gamma+col_start)) + static_cast<float>(__ldg(beta+col_start));
input2Tmp.y = float_to_int8_rn(local_out[1] * output_scale);
col_start = col_start+1;
local_out[2] = (local_out[2] * s_variance) * static_cast<float>(__ldg(gamma+col_start)) + static_cast<float>(__ldg(beta+col_start));
input2Tmp.z = float_to_int8_rn(local_out[2] * output_scale);
col_start = col_start+1;
local_out[3] = (local_out[3] * s_variance) * static_cast<float>(__ldg(gamma+col_start)) + static_cast<float>(__ldg(beta+col_start));
input2Tmp.w = float_to_int8_rn(local_out[3] * output_scale);
outTmpPtr[outIdx] = input2Tmp;
}
template <>
__global__
void add_bias_input_layernorm_COL32_mixIntI_int8O(int8_t* output, const int32_t* input1, const int8_t* input2, const half2* bias, const half2* gamma,
const half2* beta, int m, int n, const float* weight_amax, const float *input1_deQFactor_div127_ptr,
const float *input2_deQFactor_ptr, const float *output_scale_ptr)
{
const float input1_deQFactor_div127 = __ldg(input1_deQFactor_div127_ptr);
const float input2_deQFactor = __ldg(input2_deQFactor_ptr);
const float output_scale = __ldg(output_scale_ptr);
int col_start = threadIdx.x << 2;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out[4];
int input1Idx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31));
int outIdx = input1Idx >> 2;
char4 *outTmpPtr = (char4*)output;
char4 *input2TmpPtr = (char4*)input2;
char4 input2Tmp = __ldg(input2TmpPtr + outIdx);
int col_start_tmp = col_start;
half2 biasTmp = __ldg(bias + (col_start_tmp >> 1));
local_out[0] = static_cast<float>(input2Tmp.x)*input2_deQFactor + static_cast<float>(__ldg(input1 + input1Idx))*__ldg(weight_amax + col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(biasTmp.x);
col_start_tmp = col_start_tmp + 1;
local_out[1] = static_cast<float>(input2Tmp.y)*input2_deQFactor + static_cast<float>(__ldg(input1 + input1Idx + 1))*__ldg(weight_amax + col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(biasTmp.y);
col_start_tmp = col_start_tmp + 1;
biasTmp = __ldg(bias + (col_start_tmp >> 1));
local_out[2] = static_cast<float>(input2Tmp.z)*input2_deQFactor + static_cast<float>(__ldg(input1 + input1Idx + 2))*__ldg(weight_amax + col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(biasTmp.x);
col_start_tmp = col_start_tmp + 1;
local_out[3] = static_cast<float>(input2Tmp.w)*input2_deQFactor + static_cast<float>(__ldg(input1 + (input1Idx+3)))*__ldg(weight_amax + col_start_tmp)*input1_deQFactor_div127 + static_cast<float>(biasTmp.y);
mean = blockReduceSum<float>(local_out[0] + local_out[1] + local_out[2] + local_out[3]);
if(threadIdx.x == 0)
s_mean = mean * __fdividef(1.0f, n);
__syncthreads();
local_out[0] = local_out[0] - s_mean;
local_out[1] = local_out[1] - s_mean;
local_out[2] = local_out[2] - s_mean;
local_out[3] = local_out[3] - s_mean;
variance = blockReduceSum<float>(local_out[0] * local_out[0] +
local_out[1] * local_out[1] +
local_out[2] * local_out[2] +
local_out[3] * local_out[3]
);
if(threadIdx.x == 0){
s_variance = variance * __fdividef(1.0f, n) + 1e-6f;
s_variance = rsqrtf(s_variance);
}
__syncthreads();
col_start_tmp = col_start >> 1;
biasTmp = __ldg(gamma+col_start_tmp);
half2 betaTmp = __ldg(beta+col_start_tmp);
local_out[0] = (local_out[0] * s_variance) * static_cast<float>(biasTmp.x) + static_cast<float>(betaTmp.x);
input2Tmp.x = float_to_int8_rn(local_out[0] * output_scale);
col_start = col_start+1;
local_out[1] = (local_out[1] * s_variance) * static_cast<float>(biasTmp.y) + static_cast<float>(betaTmp.y);
input2Tmp.y = float_to_int8_rn(local_out[1] * output_scale);
col_start = col_start+1;
col_start_tmp = col_start >> 1;
biasTmp = __ldg(gamma+col_start_tmp);
betaTmp = __ldg(beta+col_start_tmp);
local_out[2] = (local_out[2] * s_variance) * static_cast<float>(biasTmp.x) + static_cast<float>(betaTmp.x);
input2Tmp.z = float_to_int8_rn(local_out[2] * output_scale);
col_start = col_start+1;
local_out[3] = (local_out[3] * s_variance) * static_cast<float>(biasTmp.y) + static_cast<float>(betaTmp.y);
input2Tmp.w = float_to_int8_rn(local_out[3] * output_scale);
outTmpPtr[outIdx] = input2Tmp;
}
template<typename T>
void add_bias_input_layernorm_COL32_mixIntI_int8O_kernelLauncher(int8_t* output, const int32_t* input1, const int8_t* input2, const T* bias, const T* gamma,
const T* beta, int m, int n, cudaStream_t stream, const float* weight_amax,
const float *input1_deQFactor_div127_ptr, const float *input2_deQFactor_ptr, const float *output_scale_ptr)
{
dim3 grid(m);
dim3 block(n/4);
assert(n <= 1024);
if (sizeof(T) == sizeof(half)){
add_bias_input_layernorm_COL32_mixIntI_int8O<<<grid, block, 0, stream>>>(output, input1, input2, (const half2*)bias, (const half2*)gamma,
(const half2*)beta, m, n, weight_amax, input1_deQFactor_div127_ptr,
input2_deQFactor_ptr, output_scale_ptr);
}
else{
add_bias_input_layernorm_COL32_mixIntI_int8O<T><<<grid, block, 0, stream>>>(output, input1, input2, bias, gamma, beta,
m, n, weight_amax, input1_deQFactor_div127_ptr,
input2_deQFactor_ptr, output_scale_ptr);
}
}
template void add_bias_input_layernorm_COL32_mixIntI_int8O_kernelLauncher<float>(int8_t* output, const int32_t* input1, const int8_t* input2, const float* bias, const float* gamma, const float* beta, int m, int n, cudaStream_t stream, const float* weight_amax, const float *input1_deQFactor_div127_ptr, const float *input2_deQFactor_ptr, const float *output_scale_ptr);
template void add_bias_input_layernorm_COL32_mixIntI_int8O_kernelLauncher<half>(int8_t* output, const int32_t* input1, const int8_t* input2, const half* bias, const half* gamma, const half* beta, int m, int n, cudaStream_t stream, const float* weight_amax, const float *input1_deQFactor_div127_ptr, const float *input2_deQFactor_ptr, const float *output_scale_ptr);
//input1/input2/output matrix with layout of cublasLt CUBLASLT_ORDER_COL32 (m*n)
//(grid, block) must be (m, n)
//for per_axis_quantization for weight
template <typename T>
__global__
void add_bias_input_layernorm_COL32_int32I_DataTypeO(T* output, const int32_t* input1, const T* input2, const T* bias, const T* gamma,
const T* beta, int m, int n, const float* weight_amax, const float *input1_amax_ptr)
{
const float input1_amax = __ldg(input1_amax_ptr);
int col_start = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out;
int outIdx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31));
float tmp = static_cast<float>(__ldg(input1 + outIdx)) * static_cast<float>(__ldg(weight_amax + col_start)) * input1_amax * 0.000062f; //(1/127/127);
float inputTmp = static_cast<float>(__ldg(input2 + outIdx));
local_out = tmp + inputTmp + static_cast<float>(__ldg(bias + col_start));
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean * __fdividef(1.0f, n);
__syncthreads();
local_out = local_out - s_mean;
variance = blockReduceSum<float>(local_out * local_out);
if(threadIdx.x == 0){
s_variance = variance * __fdividef(1.0f, n) + 1e-6f;
s_variance = rsqrtf(s_variance);
}
__syncthreads();
local_out = (local_out * s_variance) * static_cast<float>(__ldg(gamma + col_start)) + static_cast<float>(__ldg(beta + col_start));
output[outIdx] = local_out;
}
template <>
__global__
void add_bias_input_layernorm_COL32_int32I_DataTypeO(half2* output, const int32_t* input1, const half2* input2, const half2* bias, const half2* gamma,
const half2* beta, int m, int n, const float* weight_amax, const float *input1_amax_ptr)
{
int col_start = threadIdx.x << 1;
const float input1_amax = __ldg(input1_amax_ptr);
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out, local_out2;
int outIdx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31));
float tmp = static_cast<float>(__ldg(input1 + outIdx)) * __ldg(weight_amax + col_start) * input1_amax * 0.000062f; //(1/127/127);
float tmp2 = static_cast<float>(__ldg(input1 + outIdx + 1)) * __ldg(weight_amax + col_start + 1) * input1_amax * 0.000062f; //(1/127/127);
outIdx = outIdx >> 1;
half2 inputTmp = __ldg(input2 + outIdx);
half2 biasTmp = __ldg(bias + threadIdx.x);
local_out = tmp + static_cast<float>(inputTmp.x) + static_cast<float>(biasTmp.x);
local_out2 = tmp2 + static_cast<float>(inputTmp.y) + static_cast<float>(biasTmp.y);
mean = blockReduceSum<float>(local_out + local_out2);
if(threadIdx.x == 0)
s_mean = mean * __fdividef(1.0f, n);
__syncthreads();
local_out = local_out - s_mean;
local_out2 = local_out2 - s_mean;
variance = blockReduceSum<float>(local_out*local_out + local_out2*local_out2);
if(threadIdx.x == 0){
s_variance = variance * __fdividef(1.0f, n) + 1e-6f;
s_variance = rsqrtf(s_variance);
}
__syncthreads();
float2 outputTmp;
inputTmp = __ldg(gamma + threadIdx.x);
biasTmp = __ldg(beta + threadIdx.x);
outputTmp.x = (local_out * s_variance) * static_cast<float>(inputTmp.x) + static_cast<float>(biasTmp.x);
outputTmp.y = (local_out2 * s_variance) * static_cast<float>(inputTmp.y) + static_cast<float>(biasTmp.y);
inputTmp = __float22half2_rn(outputTmp);
output[outIdx] = inputTmp;
}
template <typename T>
void add_bias_input_layernorm_COL32_int32I_DataTypeO_kernelLauncher(T* output, const int32_t* input1, const T* input2, const T* bias, const T* gamma,
const T* beta, int m, int n, cudaStream_t stream, const float* weight_amax,
const float* input1_amax_ptr){
dim3 grid(m);
dim3 block(n);
if (sizeof(T) == sizeof(half)){
block.x /= 2;
assert(block.x <= 1024);
add_bias_input_layernorm_COL32_int32I_DataTypeO<<<grid, block, 0, stream>>>((half2 *)output, input1, (const half2 *)input2, (const half2 *)bias, (const half2 *)gamma,
(const half2 *)beta, m, n, weight_amax, input1_amax_ptr);
}
else{
assert(block.x <= 1024);
add_bias_input_layernorm_COL32_int32I_DataTypeO<T><<<grid, block, 0, stream>>>(output, input1, input2, bias, gamma,
beta, m, n, weight_amax, input1_amax_ptr);
}
}
template void add_bias_input_layernorm_COL32_int32I_DataTypeO_kernelLauncher<float>(float* output, const int32_t* input1, const float* input2, const float* bias, const float* gamma, const float* beta, int m, int n, cudaStream_t stream, const float* weight_amax, const float *input1_amax_ptr);
template void add_bias_input_layernorm_COL32_int32I_DataTypeO_kernelLauncher<half>(half* output, const int32_t* input1, const half* input2, const half* bias, const half* gamma, const half* beta, int m, int n, cudaStream_t stream, const float* weight_amax, const float *input1_amax_ptr);
//input matrix is m*n column-major
//output matrix is m*n CUBLASLT_ORDER_COL32
//(grid, block) must be (m, n)
template <typename T>
__global__
void FT_transformA(T* dst, const T* src, int m, int n)
{
int inIdx = threadIdx.x * m + blockIdx.x;
int col_start = threadIdx.x;
int outIdx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31));
dst[outIdx] = __ldg(&src[inIdx]);
}
template <typename T>
void FT_transformA_kernelLauncher(T* dst, const T* src, int m, int n, cudaStream_t stream){
dim3 grid(m);
dim3 block(n);
assert(block.x <= 1024);
FT_transformA<<<grid, block, 0, stream>>>(dst, src, m, n);
}
template void FT_transformA_kernelLauncher(float* dst, const float* src, int m, int n, cudaStream_t stream);
template void FT_transformA_kernelLauncher(half* dst, const half* src, int m, int n, cudaStream_t stream);
//input matrix is m*n CUBLASLT_ORDER_COL32
//output matrix is m*n column-major
//(grid, block) must be (m, n)
template <typename T>
__global__
void FT_transformC(T* dst, const T* src, int m, int n)
{
int outIdx = threadIdx.x * m + blockIdx.x;
int col_start = threadIdx.x;
int inIdx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31));
dst[outIdx] = __ldg(&src[inIdx]);
}
template <typename T>
void FT_transformC_kernelLauncher(T* dst, const T* src, int m, int n, cudaStream_t stream){
dim3 grid(m);
dim3 block(n);
assert(block.x <= 1024);
FT_transformC<<<grid, block, 0, stream>>>(dst, src, m, n);
}
template void FT_transformC_kernelLauncher(float* dst, const float* src, int m, int n, cudaStream_t stream);
template void FT_transformC_kernelLauncher(half* dst, const half* src, int m, int n, cudaStream_t stream);
template void FT_transformC_kernelLauncher(int8_t* dst, const int8_t* src, int m, int n, cudaStream_t stream);
//src is the result of batch MM, whose size is batch_size*head_num*(seq_len, size_per_head), CUBLASLT_ORDER_COL32
//dst is of m = batch_size*seq_len, k(n) = head_num*size_per_head, CUBLASLT_ORDER_COL32
//grid(seq_len, batch_size)
//block(size_per_head/4, head_num)
//assume size_per_head is multiples of 32
__global__
void transpose_COL32_kernel(int8_t* dst, const int32_t* src, const int batch_size, const int seq_len, const int head_num,
const int size_per_head, const float *v_buf_addBias_deQFactor, const float* qk_afterSM_deQFactor, const float* out_scale_ptr,
const int batch_size_x_seq_len, const int seq_len_x_size_per_head)
{
const float scale = __ldg(v_buf_addBias_deQFactor) * __ldg(qk_afterSM_deQFactor) * __ldg(out_scale_ptr);
int threadIdx4 = threadIdx.x << 2;
int batch_id = blockIdx.y;
int seq_id = blockIdx.x;
int head_id = threadIdx.y;
//get the (row, col) output layout of m*k
//m = batch_size*seq_len
//k = head_num*size_per_head
int mk_row = batch_id*seq_len + seq_id;
int mk_col = head_id*size_per_head + threadIdx4;
//get the (row, col) layout of COL32; leading dimension = 32*m = 32*batch_size*seq_len
int COL32_row = (mk_row << 5) + (mk_col&31);
int COL32_col = mk_col >> 5;
int outIdx = ((COL32_col << 5)*batch_size_x_seq_len + COL32_row) >> 2;
//get the (row, col) input layout of m'*k'
//m' = seq_len
//k' = size_per_head
mk_row = seq_id;
mk_col = threadIdx4;
//get the (row, col) layout of COL32; leading dimension = 32*m' = 32*seq_len
COL32_row = (mk_row << 5) + (mk_col&31);
COL32_col = mk_col >> 5;
int inIdx = (batch_id*head_num + head_id)*seq_len_x_size_per_head + (COL32_col << 5 )*seq_len + COL32_row;
char4 tmp;
tmp.x = float_to_int8_rn(__ldg(src+inIdx)*scale);
tmp.y = float_to_int8_rn(__ldg(src+inIdx+1)*scale);
tmp.z = float_to_int8_rn(__ldg(src+inIdx+2)*scale);
tmp.w = float_to_int8_rn(__ldg(src+inIdx+3)*scale);
char4 *dst_ptr4 = (char4 *)dst;
dst_ptr4[outIdx] = tmp;
}
void transpose_COL32_kernelLauncher(int8_t* dst, const int* src, const int batch_size, const int seq_len, const int head_num,
const int size_per_head, const float *v_buf_addBias_deQFactor, const float* qk_afterSM_deQFactor,
const float* out_scale_ptr, cudaStream_t stream){
assert(size_per_head%32==0);
transpose_COL32_kernel<<<dim3(seq_len, batch_size), dim3(size_per_head/4, head_num), 0, stream>>>(dst, src, batch_size, seq_len, head_num, size_per_head, v_buf_addBias_deQFactor, qk_afterSM_deQFactor, out_scale_ptr, batch_size*seq_len, seq_len*size_per_head);
}
//src is the result of batch MM, whose size is batch_size*head_num*(seq_len, size_per_head), CUBLASLT_ORDER_COL32
//dst is of m = valid_word_num, k(n) = head_num*size_per_head, CUBLASLT_ORDER_COL32
//grid(seq_len, batch_size)
//block(size_per_head/4, head_num)
//assume size_per_head is multiples of 32
__global__
void transpose_COL32_rebuild_padding_kernel(int8_t* dst, const int32_t* src, const int* sequence_id_map, const int valid_word_num, const int batch_size,
const int seq_len, const int head_num, const int size_per_head, const float *v_buf_addBias_deQFactor,
const float* qk_afterSM_deQFactor, const float* out_scale_ptr, const int seq_len_x_size_per_head)
{
const float scale = __ldg(v_buf_addBias_deQFactor) * __ldg(qk_afterSM_deQFactor) * __ldg(out_scale_ptr);
int threadIdx4 = threadIdx.x << 2;
int batch_id = blockIdx.y;
int seq_id = blockIdx.x;
int head_id = threadIdx.y;
//get the (row, col) output layout of m*k
//m = valid_word_num
//k = head_num*size_per_head
int mk_row = __ldg(sequence_id_map + batch_id*seq_len + seq_id);
if (mk_row >= 0){
int mk_col = head_id*size_per_head + threadIdx4;
//get the (row, col) layout of COL32; leading dimension = 32*m = 32*valid_word_num
int COL32_row = (mk_row << 5) + (mk_col&31);
int COL32_col = mk_col >> 5;
int outIdx = ((COL32_col << 5)*valid_word_num + COL32_row) >> 2;
//get the (row, col) input layout of m'*k'
//m' = seq_len
//k' = size_per_head
mk_row = seq_id;
mk_col = threadIdx4;
//get the (row, col) layout of COL32; leading dimension = 32*m' = 32*seq_len
COL32_row = (mk_row << 5) + (mk_col&31);
COL32_col = mk_col >> 5;
int inIdx = (batch_id*head_num + head_id)*seq_len_x_size_per_head + (COL32_col << 5 )*seq_len + COL32_row;
char4 tmp;
tmp.x = float_to_int8_rn(__ldg(src+inIdx)*scale);
tmp.y = float_to_int8_rn(__ldg(src+inIdx+1)*scale);
tmp.z = float_to_int8_rn(__ldg(src+inIdx+2)*scale);
tmp.w = float_to_int8_rn(__ldg(src+inIdx+3)*scale);
char4 *dst_ptr4 = (char4 *)dst;
dst_ptr4[outIdx] = tmp;
}
}
void transpose_COL32_rebuild_padding_kernelLauncher(int8_t* dst, const int* src, const int* sequence_id_map, const int valid_word_num, const int batch_size,
const int seq_len, const int head_num, const int size_per_head, const float *v_buf_addBias_deQFactor,
const float* qk_afterSM_deQFactor, const float* out_scale_ptr, cudaStream_t stream){
assert(size_per_head%32==0);
transpose_COL32_rebuild_padding_kernel<<<dim3(seq_len, batch_size), dim3(size_per_head/4, head_num), 0, stream>>>(dst, src, sequence_id_map, valid_word_num, batch_size,
seq_len, head_num, size_per_head, v_buf_addBias_deQFactor,
qk_afterSM_deQFactor, out_scale_ptr, seq_len*size_per_head);
}
template <typename T>
__global__
void quantized_kernel(int8_t *dst, const T* src, const int size, const float* scale_ptr)
{
int tid = (blockIdx.x*blockDim.x + threadIdx.x) << 2;
if (tid < size){
const float scale = __ldg(scale_ptr);
char4 tmp;
tmp.x = float_to_int8_rn(static_cast<float>(__ldg(&src[tid]))*scale);
tmp.y = float_to_int8_rn(static_cast<float>(__ldg(&src[tid+1]))*scale);
tmp.z = float_to_int8_rn(static_cast<float>(__ldg(&src[tid+2]))*scale);
tmp.w = float_to_int8_rn(static_cast<float>(__ldg(&src[tid+3]))*scale);
char4 *dst_ptr4 = (char4 *)dst;
dst_ptr4[tid >> 2] = tmp;
}
}
template <typename T>
void quantized_kernelLauncher(int8_t* dst, const T * src, const int size, const float* scale_ptr, cudaStream_t stream)
{
assert(size % (4 * 64) == 0);
dim3 grid((size+255)/256);
dim3 block(64);
quantized_kernel<T><<<grid, block, 0, stream>>>(dst, src, size, scale_ptr);
}
template void quantized_kernelLauncher<float>(int8_t* dst, const float * src, const int size, const float* scale_ptr, cudaStream_t stream);
template void quantized_kernelLauncher<half>(int8_t* dst, const half * src, const int size, const float* scale_ptr, cudaStream_t stream);
template void quantized_kernelLauncher<int32_t>(int8_t* dst, const int32_t * src, const int size, const float* scale_ptr, cudaStream_t stream);
template <typename T>
__global__
void dequantized_kernel(T *dst, const int8_t* src, const int size, const float *scale_ptr)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid < size){
float tmp = float(src[tid]);
dst[tid] = T(float(tmp) * __ldg(scale_ptr));
}
}
template <typename T>
void dequantized_kernelLauncher(T* dst, const int8_t * src, const int size, const float *scale_ptr, cudaStream_t stream)
{
dim3 grid((size+255)/256);
dim3 block(256);
dequantized_kernel<T><<<grid, block, 0, stream>>>(dst, src, size, scale_ptr);
}
template void dequantized_kernelLauncher<float>(float* dst, const int8_t * src, const int size, const float *scale_ptr, cudaStream_t stream);
template void dequantized_kernelLauncher<half>(half* dst, const int8_t * src, const int size, const float *scale_ptr, cudaStream_t stream);
template void dequantized_kernelLauncher<int32_t>(int32_t* dst, const int8_t * src, const int size, const float *scale_ptr, cudaStream_t stream);
}//namespace
|
1304bb6fb43857b407c8c1be968240a41f0b4ab3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2019 Patrick Stotko
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <stdgpu/atomic.cuh> // stdgpu::atomic
#include <stdgpu/bitset.cuh> // stdgpu::bitset
#include <stdgpu/iterator.h> // device_begin, device_end
#include <stdgpu/memory.h> // createDeviceArray, destroyDeviceArray
#include <stdgpu/platform.h> // STDGPU_HOST_DEVICE
struct is_odd
{
STDGPU_HOST_DEVICE bool
operator()(const int x) const
{
return x % 2 == 1;
}
};
__global__ void
set_bits(const int* d_result, const stdgpu::index_t d_result_size, stdgpu::bitset<> bits, stdgpu::atomic<int> counter)
{
stdgpu::index_t i = static_cast<stdgpu::index_t>(blockIdx.x * blockDim.x + threadIdx.x);
if (i >= d_result_size)
return;
bool was_set = bits.set(d_result[i]);
if (!was_set)
{
++counter;
}
}
int
main()
{
//
// EXAMPLE DESCRIPTION
// -------------------
// This example shows how every second bit of stdgpu::bitset can be set concurrently in a GPU kernel.
//
const stdgpu::index_t n = 100;
int* d_input = createDeviceArray<int>(n);
int* d_result = createDeviceArray<int>(n / 2);
stdgpu::bitset<> bits = stdgpu::bitset<>::createDeviceObject(n);
stdgpu::atomic<int> counter = stdgpu::atomic<int>::createDeviceObject();
thrust::sequence(stdgpu::device_begin(d_input), stdgpu::device_end(d_input), 1);
// d_input : 1, 2, 3, ..., 100
thrust::copy_if(stdgpu::device_cbegin(d_input),
stdgpu::device_cend(d_input),
stdgpu::device_begin(d_result),
is_odd());
// d_result : 1, 3, 5, ..., 99
// bits : 000000..00
stdgpu::index_t threads = 32;
stdgpu::index_t blocks = ((n / 2) + threads - 1) / threads;
counter.store(0);
hipLaunchKernelGGL(( set_bits), dim3(static_cast<unsigned int>(blocks)), dim3(static_cast<unsigned int>(threads)), 0, 0, d_result, n / 2, bits, counter);
hipDeviceSynchronize();
// bits : 010101...01
std::cout << "First run: The number of set bits is " << bits.count() << " (" << n / 2 << " expected; "
<< counter.load() << " of those previously unset)" << std::endl;
counter.store(0);
hipLaunchKernelGGL(( set_bits), dim3(static_cast<unsigned int>(blocks)), dim3(static_cast<unsigned int>(threads)), 0, 0, d_result, n / 2, bits, counter);
hipDeviceSynchronize();
// bits : 010101...01
std::cout << "Second run: The number of set bits is " << bits.count() << " (" << n / 2 << " expected; "
<< counter.load() << " of those previously unset)" << std::endl;
destroyDeviceArray<int>(d_input);
destroyDeviceArray<int>(d_result);
stdgpu::bitset<>::destroyDeviceObject(bits);
stdgpu::atomic<int>::destroyDeviceObject(counter);
}
|
1304bb6fb43857b407c8c1be968240a41f0b4ab3.cu
|
/*
* Copyright 2019 Patrick Stotko
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <stdgpu/atomic.cuh> // stdgpu::atomic
#include <stdgpu/bitset.cuh> // stdgpu::bitset
#include <stdgpu/iterator.h> // device_begin, device_end
#include <stdgpu/memory.h> // createDeviceArray, destroyDeviceArray
#include <stdgpu/platform.h> // STDGPU_HOST_DEVICE
struct is_odd
{
STDGPU_HOST_DEVICE bool
operator()(const int x) const
{
return x % 2 == 1;
}
};
__global__ void
set_bits(const int* d_result, const stdgpu::index_t d_result_size, stdgpu::bitset<> bits, stdgpu::atomic<int> counter)
{
stdgpu::index_t i = static_cast<stdgpu::index_t>(blockIdx.x * blockDim.x + threadIdx.x);
if (i >= d_result_size)
return;
bool was_set = bits.set(d_result[i]);
if (!was_set)
{
++counter;
}
}
int
main()
{
//
// EXAMPLE DESCRIPTION
// -------------------
// This example shows how every second bit of stdgpu::bitset can be set concurrently in a GPU kernel.
//
const stdgpu::index_t n = 100;
int* d_input = createDeviceArray<int>(n);
int* d_result = createDeviceArray<int>(n / 2);
stdgpu::bitset<> bits = stdgpu::bitset<>::createDeviceObject(n);
stdgpu::atomic<int> counter = stdgpu::atomic<int>::createDeviceObject();
thrust::sequence(stdgpu::device_begin(d_input), stdgpu::device_end(d_input), 1);
// d_input : 1, 2, 3, ..., 100
thrust::copy_if(stdgpu::device_cbegin(d_input),
stdgpu::device_cend(d_input),
stdgpu::device_begin(d_result),
is_odd());
// d_result : 1, 3, 5, ..., 99
// bits : 000000..00
stdgpu::index_t threads = 32;
stdgpu::index_t blocks = ((n / 2) + threads - 1) / threads;
counter.store(0);
set_bits<<<static_cast<unsigned int>(blocks), static_cast<unsigned int>(threads)>>>(d_result, n / 2, bits, counter);
cudaDeviceSynchronize();
// bits : 010101...01
std::cout << "First run: The number of set bits is " << bits.count() << " (" << n / 2 << " expected; "
<< counter.load() << " of those previously unset)" << std::endl;
counter.store(0);
set_bits<<<static_cast<unsigned int>(blocks), static_cast<unsigned int>(threads)>>>(d_result, n / 2, bits, counter);
cudaDeviceSynchronize();
// bits : 010101...01
std::cout << "Second run: The number of set bits is " << bits.count() << " (" << n / 2 << " expected; "
<< counter.load() << " of those previously unset)" << std::endl;
destroyDeviceArray<int>(d_input);
destroyDeviceArray<int>(d_result);
stdgpu::bitset<>::destroyDeviceObject(bits);
stdgpu::atomic<int>::destroyDeviceObject(counter);
}
|
7e1796c58ca69c52cfc5b57a1d1bf87ad545a695.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
namespace cv { namespace gpu { namespace device
{
namespace blend
{
template <typename T>
__global__ void blendLinearKernel(int rows, int cols, int cn, const PtrStep<T> img1, const PtrStep<T> img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStep<T> result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < rows && x < cols)
{
int x_ = x / cn;
float w1 = weights1.ptr(y)[x_];
float w2 = weights2.ptr(y)[x_];
T p1 = img1.ptr(y)[x];
T p2 = img2.ptr(y)[x];
result.ptr(y)[x] = (p1 * w1 + p2 * w2) / (w1 + w2 + 1e-5f);
}
}
template <typename T>
void blendLinearCaller(int rows, int cols, int cn, PtrStep<T> img1, PtrStep<T> img2, PtrStepf weights1, PtrStepf weights2, PtrStep<T> result, hipStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols * cn, threads.x), divUp(rows, threads.y));
hipLaunchKernelGGL(( blendLinearKernel), dim3(grid), dim3(threads), 0, stream, rows, cols * cn, cn, img1, img2, weights1, weights2, result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template void blendLinearCaller<uchar>(int, int, int, PtrStep<uchar>, PtrStep<uchar>, PtrStepf, PtrStepf, PtrStep<uchar>, hipStream_t stream);
template void blendLinearCaller<float>(int, int, int, PtrStep<float>, PtrStep<float>, PtrStepf, PtrStepf, PtrStep<float>, hipStream_t stream);
__global__ void blendLinearKernel8UC4(int rows, int cols, const PtrStepb img1, const PtrStepb img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStepb result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < rows && x < cols)
{
float w1 = weights1.ptr(y)[x];
float w2 = weights2.ptr(y)[x];
float sum_inv = 1.f / (w1 + w2 + 1e-5f);
w1 *= sum_inv;
w2 *= sum_inv;
uchar4 p1 = ((const uchar4*)img1.ptr(y))[x];
uchar4 p2 = ((const uchar4*)img2.ptr(y))[x];
((uchar4*)result.ptr(y))[x] = make_uchar4(p1.x * w1 + p2.x * w2, p1.y * w1 + p2.y * w2,
p1.z * w1 + p2.z * w2, p1.w * w1 + p2.w * w2);
}
}
void blendLinearCaller8UC4(int rows, int cols, PtrStepb img1, PtrStepb img2, PtrStepf weights1, PtrStepf weights2, PtrStepb result, hipStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
hipLaunchKernelGGL(( blendLinearKernel8UC4), dim3(grid), dim3(threads), 0, stream, rows, cols, img1, img2, weights1, weights2, result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
} // namespace blend
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
|
7e1796c58ca69c52cfc5b57a1d1bf87ad545a695.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
namespace cv { namespace gpu { namespace device
{
namespace blend
{
template <typename T>
__global__ void blendLinearKernel(int rows, int cols, int cn, const PtrStep<T> img1, const PtrStep<T> img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStep<T> result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < rows && x < cols)
{
int x_ = x / cn;
float w1 = weights1.ptr(y)[x_];
float w2 = weights2.ptr(y)[x_];
T p1 = img1.ptr(y)[x];
T p2 = img2.ptr(y)[x];
result.ptr(y)[x] = (p1 * w1 + p2 * w2) / (w1 + w2 + 1e-5f);
}
}
template <typename T>
void blendLinearCaller(int rows, int cols, int cn, PtrStep<T> img1, PtrStep<T> img2, PtrStepf weights1, PtrStepf weights2, PtrStep<T> result, cudaStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols * cn, threads.x), divUp(rows, threads.y));
blendLinearKernel<<<grid, threads, 0, stream>>>(rows, cols * cn, cn, img1, img2, weights1, weights2, result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template void blendLinearCaller<uchar>(int, int, int, PtrStep<uchar>, PtrStep<uchar>, PtrStepf, PtrStepf, PtrStep<uchar>, cudaStream_t stream);
template void blendLinearCaller<float>(int, int, int, PtrStep<float>, PtrStep<float>, PtrStepf, PtrStepf, PtrStep<float>, cudaStream_t stream);
__global__ void blendLinearKernel8UC4(int rows, int cols, const PtrStepb img1, const PtrStepb img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStepb result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < rows && x < cols)
{
float w1 = weights1.ptr(y)[x];
float w2 = weights2.ptr(y)[x];
float sum_inv = 1.f / (w1 + w2 + 1e-5f);
w1 *= sum_inv;
w2 *= sum_inv;
uchar4 p1 = ((const uchar4*)img1.ptr(y))[x];
uchar4 p2 = ((const uchar4*)img2.ptr(y))[x];
((uchar4*)result.ptr(y))[x] = make_uchar4(p1.x * w1 + p2.x * w2, p1.y * w1 + p2.y * w2,
p1.z * w1 + p2.z * w2, p1.w * w1 + p2.w * w2);
}
}
void blendLinearCaller8UC4(int rows, int cols, PtrStepb img1, PtrStepb img2, PtrStepf weights1, PtrStepf weights2, PtrStepb result, cudaStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
blendLinearKernel8UC4<<<grid, threads, 0, stream>>>(rows, cols, img1, img2, weights1, weights2, result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
} // namespace blend
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
|
fc70ff220f1bd99e716b4af38de06c3e43b68115.hip
|
// !!! This is a file automatically generated by hipify!!!
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
#define FILENAME(line) FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/kernel-utils.cu", line)
#include "awkward/kernel-utils.h"
#include <iostream>
template <typename T>
T awkward_Index_getitem_at_nowrap(const T* ptr, int64_t at) {
T item;
hipMemcpy(&item, &ptr[at], sizeof(T), hipMemcpyDeviceToHost);
return item;
}
int8_t awkward_Index8_getitem_at_nowrap(
const int8_t* ptr,
int64_t at) {
return awkward_Index_getitem_at_nowrap<int8_t>(
ptr,
at);
}
uint8_t awkward_IndexU8_getitem_at_nowrap(
const uint8_t* ptr,
int64_t at) {
return awkward_Index_getitem_at_nowrap<uint8_t>(
ptr,
at);
}
int32_t awkward_Index32_getitem_at_nowrap(
const int32_t* ptr,
int64_t at) {
return awkward_Index_getitem_at_nowrap<int32_t>(
ptr,
at);
}
uint32_t awkward_IndexU32_getitem_at_nowrap(
const uint32_t* ptr,
int64_t at) {
return awkward_Index_getitem_at_nowrap<uint32_t>(
ptr,
at);
}
int64_t awkward_Index64_getitem_at_nowrap(
const int64_t * ptr,
int64_t at) {
return awkward_Index_getitem_at_nowrap<int64_t>(
ptr,
at);
}
template <typename T>
void awkward_Index_setitem_at_nowrap(
const T* ptr,
int64_t at,
T value) {
hipMemcpy((void *) &ptr[at], &value, sizeof(T), hipMemcpyHostToDevice);
}
void awkward_Index8_setitem_at_nowrap(
const int8_t* ptr,
int64_t at,
int8_t value) {
return awkward_Index_setitem_at_nowrap<int8_t>(
ptr,
at,
value);
}
void awkward_IndexU8_setitem_at_nowrap(
const uint8_t* ptr,
int64_t at,
uint8_t value) {
return awkward_Index_setitem_at_nowrap<uint8_t>(
ptr,
at,
value);
}
void awkward_Index32_setitem_at_nowrap(
const int32_t* ptr,
int64_t at,
int32_t value) {
return awkward_Index_setitem_at_nowrap<int32_t>(
ptr,
at,
value);
}
void awkward_IndexU32_setitem_at_nowrap(
const uint32_t* ptr,
int64_t at,
uint32_t value) {
return awkward_Index_setitem_at_nowrap<uint32_t>(
ptr,
at,
value);
}
void awkward_Index64_setitem_at_nowrap(
const int64_t* ptr,
int64_t at,
int64_t value) {
return awkward_Index_setitem_at_nowrap<int64_t>(
ptr,
at,
value);
}
template <typename T>
T awkward_NumpyArray_getitem_at0(const T* ptr) {
T item;
hipMemcpy(&item, &ptr[0], sizeof(T), hipMemcpyDeviceToHost);
return item;
}
bool awkward_NumpyArraybool_getitem_at0(
const bool* ptr) {
return awkward_NumpyArray_getitem_at0<bool>(ptr);
}
int8_t awkward_NumpyArray8_getitem_at0(
const int8_t* ptr) {
return awkward_NumpyArray_getitem_at0<int8_t>(ptr);
}
uint8_t awkward_NumpyArrayU8_getitem_at0(
const uint8_t* ptr) {
return awkward_NumpyArray_getitem_at0<uint8_t>(ptr);
}
int16_t awkward_NumpyArray16_getitem_at0(
const int16_t* ptr) {
return awkward_NumpyArray_getitem_at0<int16_t>(ptr);
}
uint16_t awkward_NumpyArrayU16_getitem_at0(
const uint16_t* ptr) {
return awkward_NumpyArray_getitem_at0<uint16_t>(ptr);
}
int32_t awkward_NumpyArray32_getitem_at0(
const int32_t* ptr) {
return awkward_NumpyArray_getitem_at0<int32_t>(ptr);
}
uint32_t awkward_NumpyArrayU32_getitem_at0(
const uint32_t* ptr) {
return awkward_NumpyArray_getitem_at0<uint32_t>(ptr);
}
int64_t awkward_NumpyArray64_getitem_at0(
const int64_t* ptr) {
return awkward_NumpyArray_getitem_at0<int64_t>(ptr);
}
uint64_t awkward_NumpyArrayU64_getitem_at0(
const uint64_t* ptr) {
return awkward_NumpyArray_getitem_at0<uint64_t>(ptr);
}
float awkward_NumpyArrayfloat32_getitem_at0(
const float* ptr) {
return awkward_NumpyArray_getitem_at0<float>(ptr);
}
double awkward_NumpyArrayfloat64_getitem_at0(
const double* ptr) {
return awkward_NumpyArray_getitem_at0<double>(ptr);
}
|
fc70ff220f1bd99e716b4af38de06c3e43b68115.cu
|
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
#define FILENAME(line) FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/kernel-utils.cu", line)
#include "awkward/kernel-utils.h"
#include <iostream>
template <typename T>
T awkward_Index_getitem_at_nowrap(const T* ptr, int64_t at) {
T item;
cudaMemcpy(&item, &ptr[at], sizeof(T), cudaMemcpyDeviceToHost);
return item;
}
int8_t awkward_Index8_getitem_at_nowrap(
const int8_t* ptr,
int64_t at) {
return awkward_Index_getitem_at_nowrap<int8_t>(
ptr,
at);
}
uint8_t awkward_IndexU8_getitem_at_nowrap(
const uint8_t* ptr,
int64_t at) {
return awkward_Index_getitem_at_nowrap<uint8_t>(
ptr,
at);
}
int32_t awkward_Index32_getitem_at_nowrap(
const int32_t* ptr,
int64_t at) {
return awkward_Index_getitem_at_nowrap<int32_t>(
ptr,
at);
}
uint32_t awkward_IndexU32_getitem_at_nowrap(
const uint32_t* ptr,
int64_t at) {
return awkward_Index_getitem_at_nowrap<uint32_t>(
ptr,
at);
}
int64_t awkward_Index64_getitem_at_nowrap(
const int64_t * ptr,
int64_t at) {
return awkward_Index_getitem_at_nowrap<int64_t>(
ptr,
at);
}
template <typename T>
void awkward_Index_setitem_at_nowrap(
const T* ptr,
int64_t at,
T value) {
cudaMemcpy((void *) &ptr[at], &value, sizeof(T), cudaMemcpyHostToDevice);
}
void awkward_Index8_setitem_at_nowrap(
const int8_t* ptr,
int64_t at,
int8_t value) {
return awkward_Index_setitem_at_nowrap<int8_t>(
ptr,
at,
value);
}
void awkward_IndexU8_setitem_at_nowrap(
const uint8_t* ptr,
int64_t at,
uint8_t value) {
return awkward_Index_setitem_at_nowrap<uint8_t>(
ptr,
at,
value);
}
void awkward_Index32_setitem_at_nowrap(
const int32_t* ptr,
int64_t at,
int32_t value) {
return awkward_Index_setitem_at_nowrap<int32_t>(
ptr,
at,
value);
}
void awkward_IndexU32_setitem_at_nowrap(
const uint32_t* ptr,
int64_t at,
uint32_t value) {
return awkward_Index_setitem_at_nowrap<uint32_t>(
ptr,
at,
value);
}
void awkward_Index64_setitem_at_nowrap(
const int64_t* ptr,
int64_t at,
int64_t value) {
return awkward_Index_setitem_at_nowrap<int64_t>(
ptr,
at,
value);
}
template <typename T>
T awkward_NumpyArray_getitem_at0(const T* ptr) {
T item;
cudaMemcpy(&item, &ptr[0], sizeof(T), cudaMemcpyDeviceToHost);
return item;
}
bool awkward_NumpyArraybool_getitem_at0(
const bool* ptr) {
return awkward_NumpyArray_getitem_at0<bool>(ptr);
}
int8_t awkward_NumpyArray8_getitem_at0(
const int8_t* ptr) {
return awkward_NumpyArray_getitem_at0<int8_t>(ptr);
}
uint8_t awkward_NumpyArrayU8_getitem_at0(
const uint8_t* ptr) {
return awkward_NumpyArray_getitem_at0<uint8_t>(ptr);
}
int16_t awkward_NumpyArray16_getitem_at0(
const int16_t* ptr) {
return awkward_NumpyArray_getitem_at0<int16_t>(ptr);
}
uint16_t awkward_NumpyArrayU16_getitem_at0(
const uint16_t* ptr) {
return awkward_NumpyArray_getitem_at0<uint16_t>(ptr);
}
int32_t awkward_NumpyArray32_getitem_at0(
const int32_t* ptr) {
return awkward_NumpyArray_getitem_at0<int32_t>(ptr);
}
uint32_t awkward_NumpyArrayU32_getitem_at0(
const uint32_t* ptr) {
return awkward_NumpyArray_getitem_at0<uint32_t>(ptr);
}
int64_t awkward_NumpyArray64_getitem_at0(
const int64_t* ptr) {
return awkward_NumpyArray_getitem_at0<int64_t>(ptr);
}
uint64_t awkward_NumpyArrayU64_getitem_at0(
const uint64_t* ptr) {
return awkward_NumpyArray_getitem_at0<uint64_t>(ptr);
}
float awkward_NumpyArrayfloat32_getitem_at0(
const float* ptr) {
return awkward_NumpyArray_getitem_at0<float>(ptr);
}
double awkward_NumpyArrayfloat64_getitem_at0(
const double* ptr) {
return awkward_NumpyArray_getitem_at0<double>(ptr);
}
|
25bb78f58a942441aec31a1f79c2a9ca87236703.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Prerequisites.cuh"
#include "CTF.cuh"
#include "FFT.cuh"
#include "Generics.cuh"
#include "Helper.cuh"
#include "Optimization.cuh"
#include "Transformation.cuh"
namespace gtom
{
template<int ndims, bool fscweighted> __global__ void WienerPerFreqKernel(tcomplex* d_input, tfloat* d_fsc, tfloat nsr, tcomplex* d_output, tfloat* d_outputweights, int3 dims, CTFParamsLean* d_p);
/////////////////////////////////////////////
//Rectify the CTF envelope depending on SNR//
/////////////////////////////////////////////
void d_CTFWiener(tcomplex* d_input, int3 dimsinput, tfloat* d_fsc, CTFParams* h_params, tcomplex* d_output, tfloat* d_outputweights, uint batch)
{
CTFParamsLean* h_lean = (CTFParamsLean*)malloc(batch * sizeof(CTFParamsLean));
for (uint b = 0; b < batch; b++)
h_lean[b] = CTFParamsLean(h_params[b], dimsinput);
CTFParamsLean* d_lean = (CTFParamsLean*)CudaMallocFromHostArray(h_lean, batch * sizeof(CTFParamsLean));
free(h_lean);
dim3 TpB = dim3(min(128, NextMultipleOf(ElementsFFT2(dimsinput), 32)));
dim3 grid = dim3((ElementsFFT2(dimsinput) + TpB.x - 1) / TpB.x, dimsinput.z, batch);
if (DimensionCount(dimsinput) == 1)
WienerPerFreqKernel<1, true> << <grid, TpB >> > (d_input, d_fsc, 1.0f, d_output, d_outputweights, dimsinput, d_lean);
if (DimensionCount(dimsinput) == 2)
WienerPerFreqKernel<2, true> << <grid, TpB >> > (d_input, d_fsc, 1.0f, d_output, d_outputweights, dimsinput, d_lean);
else if (DimensionCount(dimsinput) == 3)
WienerPerFreqKernel<3, true> << <grid, TpB >> > (d_input, d_fsc, 1.0f, d_output, d_outputweights, dimsinput, d_lean);
hipFree(d_lean);
}
void d_CTFWiener(tcomplex* d_input, int3 dimsinput, tfloat snr, CTFParams* h_params, tcomplex* d_output, tfloat* d_outputweights, uint batch)
{
if (snr <= 0)
throw;
CTFParamsLean* h_lean = (CTFParamsLean*)malloc(batch * sizeof(CTFParamsLean));
for (uint b = 0; b < batch; b++)
h_lean[b] = CTFParamsLean(h_params[b], dimsinput);
CTFParamsLean* d_lean = (CTFParamsLean*)CudaMallocFromHostArray(h_lean, batch * sizeof(CTFParamsLean));
free(h_lean);
dim3 TpB = dim3(min(128, NextMultipleOf(ElementsFFT2(dimsinput), 32)));
dim3 grid = dim3((ElementsFFT2(dimsinput) + TpB.x - 1) / TpB.x, dimsinput.z, batch);
if (DimensionCount(dimsinput) == 1)
WienerPerFreqKernel<1, false> << <grid, TpB >> > (d_input, NULL, 1.0f / snr, d_output, d_outputweights, dimsinput, d_lean);
if (DimensionCount(dimsinput) == 2)
WienerPerFreqKernel<2, false> << <grid, TpB >> > (d_input, NULL, 1.0f / snr, d_output, d_outputweights, dimsinput, d_lean);
else if (DimensionCount(dimsinput) == 3)
WienerPerFreqKernel<3, false> << <grid, TpB >> > (d_input, NULL, 1.0f / snr, d_output, d_outputweights, dimsinput, d_lean);
hipFree(d_lean);
}
////////////////
//CUDA kernels//
////////////////
template<int ndims, bool fscweighted> __global__ void WienerPerFreqKernel(tcomplex* d_input, tfloat* d_fsc, tfloat nsr, tcomplex* d_output, tfloat* d_outputweights, int3 dims, CTFParamsLean* d_p)
{
uint idxy = blockIdx.x * blockDim.x + threadIdx.x;
if (idxy >= ElementsFFT2(dims))
return;
int idx = idxy % ElementsFFT1(dims.x);
uint idy = idxy / ElementsFFT1(dims.x);
uint idz = blockIdx.y;
CTFParamsLean p = d_p[blockIdx.z];
tfloat k, angle, radius;
if (ndims == 1)
{
angle = 0.0;
radius = idx;
k = radius * p.ny;
}
else if (ndims == 2)
{
int y = dims.y - 1 - FFTShift(idy, dims.y) - dims.y / 2;
float2 position = make_float2(-idx, y);
angle = atan2(position.y, position.x);
radius = sqrt(position.x * position.x + position.y * position.y);
float pixelsize = p.pixelsize + p.pixeldelta * cos(2.0f * (angle - p.pixelangle));
k = radius * p.ny / pixelsize;
}
else if (ndims == 3)
{
// No dims.x -... because angle is irrelevant
int y = FFTShift(idy, dims.y) - dims.y / 2;
int z = FFTShift(idz, dims.z) - dims.z / 2;
float3 position = make_float3(idx, y, z);
angle = 0.0;
radius = sqrt(position.x * position.x + position.y * position.y + position.z * position.z);
k = radius * p.ny;
}
{
size_t offset;
if (ndims == 1)
offset = ElementsFFT1(dims.x) * blockIdx.z + idx;
else if (ndims == 2)
offset = ElementsFFT2(dims) * blockIdx.z + getOffset(idx, idy, dims.x / 2 + 1);
else if (ndims == 3)
offset = ElementsFFT(dims) * blockIdx.z + getOffset3(idx, idy, idz, dims.x / 2 + 1, dims.y);
d_input += offset;
d_output += offset;
if (d_outputweights != NULL)
d_outputweights += offset;
if (fscweighted)
d_fsc += dims.x / 2 * blockIdx.z;
}
double amplitude = 1;
tfloat weight = 1;
tcomplex input = *d_input;
if (radius > 0)
{
amplitude = d_GetCTF<false, false>(k, angle, 0, p);
if (fscweighted)
{
// Linear interpolation over the FSC curve
tfloat fsc = abs(lerp(d_fsc[min(dims.x / 2 - 1, (int)radius)], d_fsc[min(dims.x / 2 - 1, (int)radius + 1)], radius - floor(radius)));
// FSC too small, avoid numerical error in division later
if (fsc < 1e-6f)
{
*d_output = make_cuComplex(0, 0);
if (d_outputweights != NULL)
*d_outputweights = 0;
return;
}
// FSC significant enough, SNR = FSC / (1 - FSC), but Wiener needs 1/SNR
else
weight = amplitude / (amplitude * amplitude + (1.0f - fsc) / fsc);
}
else
{
weight = amplitude / (amplitude * amplitude + nsr);
}
//weight = amplitude < 0.0f ? 1.0f : 1.0f;
}
*d_output = make_cuComplex(input.x * weight, input.y * weight);
//*d_output = make_cuComplex(amplitude, 0.0f);
if (d_outputweights != NULL)
*d_outputweights = amplitude * weight;
}
}
|
25bb78f58a942441aec31a1f79c2a9ca87236703.cu
|
#include "Prerequisites.cuh"
#include "CTF.cuh"
#include "FFT.cuh"
#include "Generics.cuh"
#include "Helper.cuh"
#include "Optimization.cuh"
#include "Transformation.cuh"
namespace gtom
{
template<int ndims, bool fscweighted> __global__ void WienerPerFreqKernel(tcomplex* d_input, tfloat* d_fsc, tfloat nsr, tcomplex* d_output, tfloat* d_outputweights, int3 dims, CTFParamsLean* d_p);
/////////////////////////////////////////////
//Rectify the CTF envelope depending on SNR//
/////////////////////////////////////////////
void d_CTFWiener(tcomplex* d_input, int3 dimsinput, tfloat* d_fsc, CTFParams* h_params, tcomplex* d_output, tfloat* d_outputweights, uint batch)
{
CTFParamsLean* h_lean = (CTFParamsLean*)malloc(batch * sizeof(CTFParamsLean));
for (uint b = 0; b < batch; b++)
h_lean[b] = CTFParamsLean(h_params[b], dimsinput);
CTFParamsLean* d_lean = (CTFParamsLean*)CudaMallocFromHostArray(h_lean, batch * sizeof(CTFParamsLean));
free(h_lean);
dim3 TpB = dim3(min(128, NextMultipleOf(ElementsFFT2(dimsinput), 32)));
dim3 grid = dim3((ElementsFFT2(dimsinput) + TpB.x - 1) / TpB.x, dimsinput.z, batch);
if (DimensionCount(dimsinput) == 1)
WienerPerFreqKernel<1, true> << <grid, TpB >> > (d_input, d_fsc, 1.0f, d_output, d_outputweights, dimsinput, d_lean);
if (DimensionCount(dimsinput) == 2)
WienerPerFreqKernel<2, true> << <grid, TpB >> > (d_input, d_fsc, 1.0f, d_output, d_outputweights, dimsinput, d_lean);
else if (DimensionCount(dimsinput) == 3)
WienerPerFreqKernel<3, true> << <grid, TpB >> > (d_input, d_fsc, 1.0f, d_output, d_outputweights, dimsinput, d_lean);
cudaFree(d_lean);
}
void d_CTFWiener(tcomplex* d_input, int3 dimsinput, tfloat snr, CTFParams* h_params, tcomplex* d_output, tfloat* d_outputweights, uint batch)
{
if (snr <= 0)
throw;
CTFParamsLean* h_lean = (CTFParamsLean*)malloc(batch * sizeof(CTFParamsLean));
for (uint b = 0; b < batch; b++)
h_lean[b] = CTFParamsLean(h_params[b], dimsinput);
CTFParamsLean* d_lean = (CTFParamsLean*)CudaMallocFromHostArray(h_lean, batch * sizeof(CTFParamsLean));
free(h_lean);
dim3 TpB = dim3(min(128, NextMultipleOf(ElementsFFT2(dimsinput), 32)));
dim3 grid = dim3((ElementsFFT2(dimsinput) + TpB.x - 1) / TpB.x, dimsinput.z, batch);
if (DimensionCount(dimsinput) == 1)
WienerPerFreqKernel<1, false> << <grid, TpB >> > (d_input, NULL, 1.0f / snr, d_output, d_outputweights, dimsinput, d_lean);
if (DimensionCount(dimsinput) == 2)
WienerPerFreqKernel<2, false> << <grid, TpB >> > (d_input, NULL, 1.0f / snr, d_output, d_outputweights, dimsinput, d_lean);
else if (DimensionCount(dimsinput) == 3)
WienerPerFreqKernel<3, false> << <grid, TpB >> > (d_input, NULL, 1.0f / snr, d_output, d_outputweights, dimsinput, d_lean);
cudaFree(d_lean);
}
////////////////
//CUDA kernels//
////////////////
template<int ndims, bool fscweighted> __global__ void WienerPerFreqKernel(tcomplex* d_input, tfloat* d_fsc, tfloat nsr, tcomplex* d_output, tfloat* d_outputweights, int3 dims, CTFParamsLean* d_p)
{
uint idxy = blockIdx.x * blockDim.x + threadIdx.x;
if (idxy >= ElementsFFT2(dims))
return;
int idx = idxy % ElementsFFT1(dims.x);
uint idy = idxy / ElementsFFT1(dims.x);
uint idz = blockIdx.y;
CTFParamsLean p = d_p[blockIdx.z];
tfloat k, angle, radius;
if (ndims == 1)
{
angle = 0.0;
radius = idx;
k = radius * p.ny;
}
else if (ndims == 2)
{
int y = dims.y - 1 - FFTShift(idy, dims.y) - dims.y / 2;
float2 position = make_float2(-idx, y);
angle = atan2(position.y, position.x);
radius = sqrt(position.x * position.x + position.y * position.y);
float pixelsize = p.pixelsize + p.pixeldelta * cos(2.0f * (angle - p.pixelangle));
k = radius * p.ny / pixelsize;
}
else if (ndims == 3)
{
// No dims.x -... because angle is irrelevant
int y = FFTShift(idy, dims.y) - dims.y / 2;
int z = FFTShift(idz, dims.z) - dims.z / 2;
float3 position = make_float3(idx, y, z);
angle = 0.0;
radius = sqrt(position.x * position.x + position.y * position.y + position.z * position.z);
k = radius * p.ny;
}
{
size_t offset;
if (ndims == 1)
offset = ElementsFFT1(dims.x) * blockIdx.z + idx;
else if (ndims == 2)
offset = ElementsFFT2(dims) * blockIdx.z + getOffset(idx, idy, dims.x / 2 + 1);
else if (ndims == 3)
offset = ElementsFFT(dims) * blockIdx.z + getOffset3(idx, idy, idz, dims.x / 2 + 1, dims.y);
d_input += offset;
d_output += offset;
if (d_outputweights != NULL)
d_outputweights += offset;
if (fscweighted)
d_fsc += dims.x / 2 * blockIdx.z;
}
double amplitude = 1;
tfloat weight = 1;
tcomplex input = *d_input;
if (radius > 0)
{
amplitude = d_GetCTF<false, false>(k, angle, 0, p);
if (fscweighted)
{
// Linear interpolation over the FSC curve
tfloat fsc = abs(lerp(d_fsc[min(dims.x / 2 - 1, (int)radius)], d_fsc[min(dims.x / 2 - 1, (int)radius + 1)], radius - floor(radius)));
// FSC too small, avoid numerical error in division later
if (fsc < 1e-6f)
{
*d_output = make_cuComplex(0, 0);
if (d_outputweights != NULL)
*d_outputweights = 0;
return;
}
// FSC significant enough, SNR = FSC / (1 - FSC), but Wiener needs 1/SNR
else
weight = amplitude / (amplitude * amplitude + (1.0f - fsc) / fsc);
}
else
{
weight = amplitude / (amplitude * amplitude + nsr);
}
//weight = amplitude < 0.0f ? 1.0f : 1.0f;
}
*d_output = make_cuComplex(input.x * weight, input.y * weight);
//*d_output = make_cuComplex(amplitude, 0.0f);
if (d_outputweights != NULL)
*d_outputweights = amplitude * weight;
}
}
|
31e9ec13e9a97612dbbf38c6486f7e975b2c5bfa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float* var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10) {
for (int i=0; i < var_1; ++i) {
var_2[i] = -1.7352E-36f;
comp = var_2[i] + +0.0f * var_3 + -0.0f;
comp += -1.2258E10f - (var_4 + log10f(var_5 - +1.8246E-42f / (var_6 - (var_7 * var_8 - var_9))));
comp += var_10 / -0.0f;
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float* tmp_3 = initPointer( atof(argv[3]) );
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11);
hipDeviceSynchronize();
return 0;
}
|
31e9ec13e9a97612dbbf38c6486f7e975b2c5bfa.cu
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float* var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10) {
for (int i=0; i < var_1; ++i) {
var_2[i] = -1.7352E-36f;
comp = var_2[i] + +0.0f * var_3 + -0.0f;
comp += -1.2258E10f - (var_4 + log10f(var_5 - +1.8246E-42f / (var_6 - (var_7 * var_8 - var_9))));
comp += var_10 / -0.0f;
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float* tmp_3 = initPointer( atof(argv[3]) );
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11);
cudaDeviceSynchronize();
return 0;
}
|
ff51b23ee1740dc3e8f727dc5ae21fb9273eb4b7.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <common.h>
namespace cuda
{
void pi_init()
{
/*
TODO any initialization code you need goes here, e.g. random
number seeding, hipMalloc allocations, etc. Random number
_generation_ should still go in pi().
*/
}
double pi()
{
/*
TODO Put your code here. You can use anything in the CUDA
Toolkit, including libraries, Thrust, or your own device
kernels, but do not use ArrayFire functions here. If you have
initialization code, see pi_init().
*/
return 0;
}
void pi_reset()
{
/*
TODO This function should contain the clean up. You should add
memory deallocation etc here.
*/
}
}
|
ff51b23ee1740dc3e8f727dc5ae21fb9273eb4b7.cu
|
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <common.h>
namespace cuda
{
void pi_init()
{
/*
TODO any initialization code you need goes here, e.g. random
number seeding, cudaMalloc allocations, etc. Random number
_generation_ should still go in pi().
*/
}
double pi()
{
/*
TODO Put your code here. You can use anything in the CUDA
Toolkit, including libraries, Thrust, or your own device
kernels, but do not use ArrayFire functions here. If you have
initialization code, see pi_init().
*/
return 0;
}
void pi_reset()
{
/*
TODO This function should contain the clean up. You should add
memory deallocation etc here.
*/
}
}
|
b80f269bdef28247a9a540f6b5dc2803249e7430.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "Grid.h"
#include <cassert>
#include <cstdio>
#define MATRIX_EPSILON 1e-6
__host__ __device__ double bspline(double x) {
x = fabs(x);
double w;
if (x < 1)
w = x * x * (x / 2 - 1) + 2 / 3.0;
else if (x < 2)
w = x * (x * (-x / 6 + 1) - 2) + 4 / 3.0;
else return 0;
return w;
}
//Slope of interpolation function
__host__ __device__ double bsplineSlope(double x) {
double abs_x = fabs(x);
if (abs_x < 1)
return 1.5 * x * abs_x - 2 * x;
else if (x < 2)
return -x * abs_x / 2 + 2 * x - 2 * x / abs_x;
else return 0;
}
__host__ __device__ Matrix2D outer_product(Vector2D& a, Vector2D& b) {
return Matrix2D(a.x * b.x, a.x * b.y, a.y * b.x, a.y * b.y);
}
__host__ __device__ void polarDecomp(Matrix2D m, Matrix2D& R, Matrix2D& S) {
auto x = m(0, 0) + m(1, 1);
auto y = m(1, 0) - m(0, 1);
auto scale = 1.0 / sqrt(x * x + y * y);
auto c = x * scale, s = y * scale;
R(0, 0) = c;
R(0, 1) = -s;
R(1, 0) = s;
R(1, 1) = c;
S = R.T() * m;
}
__host__ __device__ void svd(Matrix2D m, Matrix2D& U, Matrix2D& sig, Matrix2D& V){
if (fabs(m(1, 0) - m(0, 1)) < MATRIX_EPSILON && fabs(m(1, 0)) < MATRIX_EPSILON) {
U = Matrix2D(m(0, 0) < 0 ? -1 : 1, 0, 0, m(1, 1) < 0 ? -1 : 1);
sig(0, 0) = fabs(m(0, 0)), sig(1, 1) = fabs(m(1, 1));
V = Matrix2D();
}
else {
double j = m(0, 0) * m(0, 0) + m(1, 0) * m(1, 0);
double k = m(0, 1) * m(0, 1) + m(1, 1) * m(1, 1);
double v_c = m(0, 0) * m(0, 1) + m(1, 0) * m(1, 1);
if (fabs(v_c) < MATRIX_EPSILON) {
double s1 = sqrt(j);
double s2 = fabs(j - k) < MATRIX_EPSILON ? s1 : sqrt(k);
sig(0, 0) = s1, sig(1, 1) = s2;
V = Matrix2D();
U = Matrix2D(m(0, 0) / s1, m(0, 1) / s2, m(1, 0) / s1, m(1, 1) / s2);
}
else {
double jmk = j - k,
jpk = j + k,
root = sqrt(jmk * jmk + 4 * v_c * v_c),
eig = (jpk + root) / 2,
s1 = sqrt(eig),
s2 = fabs(root) < MATRIX_EPSILON ? s1 : sqrt((jpk - root) / 2);
sig(0, 0) = s1, sig(1, 1) = s2;
double v_s = eig - j,
len = sqrt(v_s * v_s + v_c * v_c);
v_c /= len;
v_s /= len;
V = Matrix2D(v_c, -v_s, v_s, v_c);
U = Matrix2D(
(m(0, 0) * v_c + m(0, 1) * v_s) / s1,
(m(0, 1) * v_c - m(0, 0) * v_s) / s2,
(m(1, 0) * v_c + m(1, 1) * v_s) / s1,
(m(1, 1) * v_c - m(1, 0) * v_s) / s2
);
}
}
}
__device__ double my_atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
struct OBCmp {
__host__ __device__
bool operator()(const SpGrid& o1, const SpGrid& o2) {
return o1.node_id< o2.node_id;
}
};
__host__ void Grid::initGridMassVel() {
// Map particle to grid
Node* grid_ptr = thrust::raw_pointer_cast(&nodes[0]);
auto func = [=] __device__ (Particle & p) {
Vector2D _origin(0, 0), _node_size(1. / 128., 1. /128.), _size(256 + 1, 128 + 1);
// get the index of the grid cross point corresponding to the particle (it is on the bottom left of the particle)
p.grid_p = (p.pos - _origin) / _node_size;
int p_x = (int)p.grid_p.x; // x coord index in grid
int p_y = (int)p.grid_p.y; // y coord index in grid
// Map from (p_x - 1, p_y - 1) to (p_x + 2, p_y + 2)
// The origin is bottom left, which means node_id = y * size.x + x
for (int it = 0, y = p_y - 1; y <= p_y + 2; ++y) {
if (y < 0 || y >= _size.y) // here size.y has already been added by 1
continue;
// Y interpolation
double weight_y = bspline(p.grid_p.y - y);
double dy = bsplineSlope(p.grid_p.y - y);
for (int x = p_x - 1; x <= p_x + 2; ++x, ++it) {
if (x < 0 || x >= _size.x)
continue;
// X interpolation
double weight_x = bspline(p.grid_p.x - x);
double dx = bsplineSlope(p.grid_p.x - x);
// set weight of particles related nodes
double w = weight_x * weight_y;
p.weights[it] = w;
// set weight gradient
p.weight_gradient[it] = Vector2D(dx * weight_y, dy * weight_x);
p.weight_gradient[it] /= _node_size;
// set node weighted mass and velocity
int node_id = int(y * _size.x + x);
//nodes[node_id].mass += w * p.mass;
//my_atomicAdd(&(grid_ptr[node_id].mass), w * p.mass);
//nodes[node_id].vel += p.vel * w * p.mass;
Vector2D temp = p.vel * w * p.mass;
//my_atomicAdd(&(grid_ptr[node_id].vel.x), temp.x);
//my_atomicAdd(&(grid_ptr[node_id].vel.y), temp.y);
//nodes[node_id].active = true;
//atomicAdd(&(grid_ptr[node_id].active), 1);
}
}
};
thrust::for_each(thrust::device, particles.begin(), particles.end(), func);
//cout << other_particles.size() << endl;
thrust::sort(thrust::device, other_particles.begin(), other_particles.end(), OBCmp());
thrust::for_each(
thrust::device,
nodes.begin(),
nodes.end(),
[=] __device__(Node& n) {
if (n.active)
n.vel /= n.mass;
}
);
}
// Calculate particles'volumes
__host__ void Grid::initVolumes() {
Node* grid_ptr = thrust::raw_pointer_cast(&nodes[0]);
auto func = [=] __device__(Particle & p) {
Vector2D _origin(0, 0), _node_size(1. / 128., 1. /128.), _size(256 + 1, 128 + 1);
double _node_area = _node_size.x * _node_size.y;
int p_x = (int)p.grid_p.x;
int p_y = (int)p.grid_p.y;
p.density = 0;
for (int it = 0, y = p_y - 1; y <= p_y + 2; ++y) {
for (int x = p_x - 1; x <= p_x + 2; ++x, ++it) {
if (y < 0 || y >= _size.y || x < 0 || x >= _size.x)
continue;
double w = p.weights[it];
int node_id = int(y * _size.x + x);
if (w > BSPLINE_EPSILON) {
p.density += w * grid_ptr[node_id].mass;
}
}
}
p.density /= _node_area;
p.volume = p.mass / p.density;
};
thrust::for_each(thrust::device, particles.begin(), particles.end(), func);
}
//Calculate grid's velocity of next timestep
__host__ void Grid::computeForce() {
Node* grid_ptr = thrust::raw_pointer_cast(&nodes[0]);
auto func = [=] __device__(Particle & p) {
// First calculate force based on mpmcourse
Vector2D _origin(0, 0), _node_size(1. / 128., 1. /128.), _size(256 + 1, 128 + 1);
double _node_area = _node_size.x * _node_size.y;
Matrix2D I;
Matrix2D U, Sig, V;
svd(p.elastic_deformation, U, Sig, V);
double e = ::exp(HARDENING * (1.0f - p.plastic_deformation.det()));
double lambda = LAMBDA * e;
double mu = MU * e;
double Je = Sig.det();
Matrix2D temp = (p.elastic_deformation - U * V.T()) * p.elastic_deformation.T() * 2 * mu + I * lambda * Je * (Je - 1);
temp = temp * p.volume;
// accumulate particle stress to grids
int p_x = (int)p.grid_p.x;
int p_y = (int)p.grid_p.y;
for (int it = 0, y = p_y - 1; y <= p_y + 2; ++y) {
for (int x = p_x - 1; x <= p_x + 2; ++x, ++it) {
if (y < 0 || y >= _size.y || x < 0 || x >= _size.x)
continue;
double w = p.weights[it];
int node_id = int(y * _size.x + x);
//Node& node = nodes[node_id];
if (w > BSPLINE_EPSILON) {
//grid_ptr[node_id].force -= temp * p.weight_gradient[it];
Vector2D value = temp * p.weight_gradient[it];
//my_atomicAdd(&(grid_ptr[node_id].force.x), -value.x);
//my_atomicAdd(&(grid_ptr[node_id].force.y), -value.y);
}
}
}
};
thrust::sort(thrust::device, other_particles.begin(), other_particles.end(), OBCmp());
thrust::for_each(thrust::device, particles.begin(), particles.end(), func);
}
__host__ void Grid::updateVelocity() {
// here is how we update grid velocity
thrust::for_each(
thrust::device,
nodes.begin(),
nodes.end(),
[=] __device__(Node& n) {
double timestep = 0.0001;
Vector2D gravity(0, -9.8);
if (n.active) {
n.vel_new = n.vel + timestep * (gravity + n.force / n.mass);
//printf("updated!\n");
}
}
);
collisionGrid();
}
__host__ void Grid::updateDeformation() {
Node* grid_ptr = thrust::raw_pointer_cast(&nodes[0]);
auto func = [=] __device__(Particle & p) {
Vector2D _origin(0, 0), _node_size(1. / 128., 1. /128.), _size(256 + 1, 128 + 1);
double _node_area = _node_size.x * _node_size.y;
int p_x = (int)p.grid_p.x;
int p_y = (int)p.grid_p.y;
p.velocity_gradient = Matrix2D(0, 0, 0, 0);
for (int it = 0, y = p_y - 1; y <= p_y + 2; ++y) {
for (int x = p_x - 1; x <= p_x + 2; ++x, ++it) {
if (y < 0 || y >= _size.y || x < 0 || x >= _size.x)
continue;
double temp = p.weights[it];
Vector2D delta_w = p.weight_gradient[it];
int node_id = int(y * _size.x + x);
if (temp > BSPLINE_EPSILON) {
p.velocity_gradient += outer_product(grid_ptr[node_id].vel_new, delta_w);
}
}
}
};
thrust::for_each(thrust::device, particles.begin(), particles.end(), func);
auto func2 = [=] __device__(Particle & p) {
Matrix2D I = Matrix2D();
p.elastic_deformation = (I + p.velocity_gradient * TIMESTEP) * p.elastic_deformation;
p.deformation_gradient = p.elastic_deformation * p.plastic_deformation;
Matrix2D U, Sig, V;
svd(p.elastic_deformation, U, Sig, V);
for (int idx = 0; idx < 2; ++idx) {
if (Sig(idx, idx) < CRIT_COMPRESS) {
Sig(idx, idx) = CRIT_COMPRESS;
}
else if (Sig(idx, idx) > CRIT_STRETCH) {
Sig(idx, idx) = CRIT_STRETCH;
}
}
Matrix2D Sig_inv(1.0 / Sig(0, 0), 0, 0, 1.0 / Sig(1, 1));
p.elastic_deformation = U * Sig * V.T();
p.plastic_deformation = V * Sig_inv * U.T() * p.deformation_gradient;
};
thrust::for_each(thrust::device, particles.begin(), particles.end(), func2);
}
// Map back to particles
__host__ void Grid::updateParticlesVelocity() {
Node* grid_ptr = thrust::raw_pointer_cast(&nodes[0]);
auto func = [=] __device__(Particle & p) {
Vector2D _origin(0, 0), _node_size(1. / 128., 1. /128.), _size(256 + 1, 128 + 1);
double _node_area = _node_size.x * _node_size.y;
int p_x = (int)p.grid_p.x;
int p_y = (int)p.grid_p.y;
p.density = 0;
Vector2D v_pic, v_flip = p.vel;
for (int it = 0, y = p_y - 1; y <= p_y + 2; ++y) {
for (int x = p_x - 1; x <= p_x + 2; ++x, ++it) {
if (y < 0 || y >= _size.y || x < 0 || x >= _size.x)
continue;
double w = p.weights[it];
int node_id = int(y * _size.x + x);
if (w > BSPLINE_EPSILON) {
//Node& node = nodes[node_id];
v_pic += grid_ptr[node_id].vel_new * w;
v_flip += (grid_ptr[node_id].vel_new - grid_ptr[node_id].vel) * w;
p.density += w * grid_ptr[node_id].mass;
}
}
}
double flip_percent = .95;
p.vel = v_flip * flip_percent + v_pic * (1 - flip_percent);
p.density /= _node_area;
};
thrust::for_each(thrust::device, particles.begin(), particles.end(), func);
collisionParticles();
}
__host__ void Grid::updateParticlesPosition() {
auto func = [=] __device__(Particle & p) {
double timestep = 0.0001;
p.pos += timestep * p.vel;
};
thrust::for_each(thrust::device, particles.begin(), particles.end(), func);
}
__host__ void Grid::collisionGrid() {
auto func = [=] __device__(Node & n) {
Vector2D _origin(0, 0), _node_size(1. / 128., 1. /128.), _size(256 + 1, 128 + 1);
double _node_area = _node_size.x * _node_size.y;
double timestep = 0.0001;
if (n.active) {
Vector2D delta_scale = Vector2D(timestep, timestep);
delta_scale /= _node_size;
Vector2D new_pos = n.vel_new * delta_scale + n.pos;
if (new_pos.x < BSPLINE_RADIUS || new_pos.x > _size.x - BSPLINE_RADIUS - 1) {
n.vel_new.x = 0;
n.vel_new.y *= STICKY;
}
if (new_pos.y < BSPLINE_RADIUS || new_pos.y > _size.y - BSPLINE_RADIUS - 1) {
n.vel_new.x *= STICKY;
n.vel_new.y = 0;
}
}
};
thrust::for_each(thrust::device, nodes.begin(), nodes.end(), func);
}
__host__ void Grid::collisionParticles() {
auto func = [=] __device__(Particle & p) {
Vector2D _origin(0, 0), _node_size(1. / 128., 1. /128.), _size(256 + 1, 128 + 1);
double _node_area = _node_size.x * _node_size.y;
double timestep = 0.0001;
Vector2D delta_scale = Vector2D(timestep, timestep);
delta_scale /= _node_size;
Vector2D new_pos = p.grid_p + p.vel * delta_scale;
if (new_pos.x < BSPLINE_RADIUS - 1 || new_pos.x > _size.x - BSPLINE_RADIUS) {
p.vel.x *= -STICKY;
}
if (new_pos.y < BSPLINE_RADIUS - 1 || new_pos.y > _size.y - BSPLINE_RADIUS) {
p.vel.y *= -STICKY;
}
};
thrust::for_each(thrust::device, particles.begin(), particles.end(), func);
}
|
b80f269bdef28247a9a540f6b5dc2803249e7430.cu
|
#include "Grid.h"
#include <cassert>
#include <cstdio>
#define MATRIX_EPSILON 1e-6
__host__ __device__ double bspline(double x) {
x = fabs(x);
double w;
if (x < 1)
w = x * x * (x / 2 - 1) + 2 / 3.0;
else if (x < 2)
w = x * (x * (-x / 6 + 1) - 2) + 4 / 3.0;
else return 0;
return w;
}
//Slope of interpolation function
__host__ __device__ double bsplineSlope(double x) {
double abs_x = fabs(x);
if (abs_x < 1)
return 1.5 * x * abs_x - 2 * x;
else if (x < 2)
return -x * abs_x / 2 + 2 * x - 2 * x / abs_x;
else return 0;
}
__host__ __device__ Matrix2D outer_product(Vector2D& a, Vector2D& b) {
return Matrix2D(a.x * b.x, a.x * b.y, a.y * b.x, a.y * b.y);
}
__host__ __device__ void polarDecomp(Matrix2D m, Matrix2D& R, Matrix2D& S) {
auto x = m(0, 0) + m(1, 1);
auto y = m(1, 0) - m(0, 1);
auto scale = 1.0 / sqrt(x * x + y * y);
auto c = x * scale, s = y * scale;
R(0, 0) = c;
R(0, 1) = -s;
R(1, 0) = s;
R(1, 1) = c;
S = R.T() * m;
}
__host__ __device__ void svd(Matrix2D m, Matrix2D& U, Matrix2D& sig, Matrix2D& V){
if (fabs(m(1, 0) - m(0, 1)) < MATRIX_EPSILON && fabs(m(1, 0)) < MATRIX_EPSILON) {
U = Matrix2D(m(0, 0) < 0 ? -1 : 1, 0, 0, m(1, 1) < 0 ? -1 : 1);
sig(0, 0) = fabs(m(0, 0)), sig(1, 1) = fabs(m(1, 1));
V = Matrix2D();
}
else {
double j = m(0, 0) * m(0, 0) + m(1, 0) * m(1, 0);
double k = m(0, 1) * m(0, 1) + m(1, 1) * m(1, 1);
double v_c = m(0, 0) * m(0, 1) + m(1, 0) * m(1, 1);
if (fabs(v_c) < MATRIX_EPSILON) {
double s1 = sqrt(j);
double s2 = fabs(j - k) < MATRIX_EPSILON ? s1 : sqrt(k);
sig(0, 0) = s1, sig(1, 1) = s2;
V = Matrix2D();
U = Matrix2D(m(0, 0) / s1, m(0, 1) / s2, m(1, 0) / s1, m(1, 1) / s2);
}
else {
double jmk = j - k,
jpk = j + k,
root = sqrt(jmk * jmk + 4 * v_c * v_c),
eig = (jpk + root) / 2,
s1 = sqrt(eig),
s2 = fabs(root) < MATRIX_EPSILON ? s1 : sqrt((jpk - root) / 2);
sig(0, 0) = s1, sig(1, 1) = s2;
double v_s = eig - j,
len = sqrt(v_s * v_s + v_c * v_c);
v_c /= len;
v_s /= len;
V = Matrix2D(v_c, -v_s, v_s, v_c);
U = Matrix2D(
(m(0, 0) * v_c + m(0, 1) * v_s) / s1,
(m(0, 1) * v_c - m(0, 0) * v_s) / s2,
(m(1, 0) * v_c + m(1, 1) * v_s) / s1,
(m(1, 1) * v_c - m(1, 0) * v_s) / s2
);
}
}
}
__device__ double my_atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
struct OBCmp {
__host__ __device__
bool operator()(const SpGrid& o1, const SpGrid& o2) {
return o1.node_id< o2.node_id;
}
};
__host__ void Grid::initGridMassVel() {
// Map particle to grid
Node* grid_ptr = thrust::raw_pointer_cast(&nodes[0]);
auto func = [=] __device__ (Particle & p) {
Vector2D _origin(0, 0), _node_size(1. / 128., 1. /128.), _size(256 + 1, 128 + 1);
// get the index of the grid cross point corresponding to the particle (it is on the bottom left of the particle)
p.grid_p = (p.pos - _origin) / _node_size;
int p_x = (int)p.grid_p.x; // x coord index in grid
int p_y = (int)p.grid_p.y; // y coord index in grid
// Map from (p_x - 1, p_y - 1) to (p_x + 2, p_y + 2)
// The origin is bottom left, which means node_id = y * size.x + x
for (int it = 0, y = p_y - 1; y <= p_y + 2; ++y) {
if (y < 0 || y >= _size.y) // here size.y has already been added by 1
continue;
// Y interpolation
double weight_y = bspline(p.grid_p.y - y);
double dy = bsplineSlope(p.grid_p.y - y);
for (int x = p_x - 1; x <= p_x + 2; ++x, ++it) {
if (x < 0 || x >= _size.x)
continue;
// X interpolation
double weight_x = bspline(p.grid_p.x - x);
double dx = bsplineSlope(p.grid_p.x - x);
// set weight of particles related nodes
double w = weight_x * weight_y;
p.weights[it] = w;
// set weight gradient
p.weight_gradient[it] = Vector2D(dx * weight_y, dy * weight_x);
p.weight_gradient[it] /= _node_size;
// set node weighted mass and velocity
int node_id = int(y * _size.x + x);
//nodes[node_id].mass += w * p.mass;
//my_atomicAdd(&(grid_ptr[node_id].mass), w * p.mass);
//nodes[node_id].vel += p.vel * w * p.mass;
Vector2D temp = p.vel * w * p.mass;
//my_atomicAdd(&(grid_ptr[node_id].vel.x), temp.x);
//my_atomicAdd(&(grid_ptr[node_id].vel.y), temp.y);
//nodes[node_id].active = true;
//atomicAdd(&(grid_ptr[node_id].active), 1);
}
}
};
thrust::for_each(thrust::device, particles.begin(), particles.end(), func);
//cout << other_particles.size() << endl;
thrust::sort(thrust::device, other_particles.begin(), other_particles.end(), OBCmp());
thrust::for_each(
thrust::device,
nodes.begin(),
nodes.end(),
[=] __device__(Node& n) {
if (n.active)
n.vel /= n.mass;
}
);
}
// Calculate particles'volumes
__host__ void Grid::initVolumes() {
Node* grid_ptr = thrust::raw_pointer_cast(&nodes[0]);
auto func = [=] __device__(Particle & p) {
Vector2D _origin(0, 0), _node_size(1. / 128., 1. /128.), _size(256 + 1, 128 + 1);
double _node_area = _node_size.x * _node_size.y;
int p_x = (int)p.grid_p.x;
int p_y = (int)p.grid_p.y;
p.density = 0;
for (int it = 0, y = p_y - 1; y <= p_y + 2; ++y) {
for (int x = p_x - 1; x <= p_x + 2; ++x, ++it) {
if (y < 0 || y >= _size.y || x < 0 || x >= _size.x)
continue;
double w = p.weights[it];
int node_id = int(y * _size.x + x);
if (w > BSPLINE_EPSILON) {
p.density += w * grid_ptr[node_id].mass;
}
}
}
p.density /= _node_area;
p.volume = p.mass / p.density;
};
thrust::for_each(thrust::device, particles.begin(), particles.end(), func);
}
//Calculate grid's velocity of next timestep
__host__ void Grid::computeForce() {
Node* grid_ptr = thrust::raw_pointer_cast(&nodes[0]);
auto func = [=] __device__(Particle & p) {
// First calculate force based on mpmcourse
Vector2D _origin(0, 0), _node_size(1. / 128., 1. /128.), _size(256 + 1, 128 + 1);
double _node_area = _node_size.x * _node_size.y;
Matrix2D I;
Matrix2D U, Sig, V;
svd(p.elastic_deformation, U, Sig, V);
double e = std::exp(HARDENING * (1.0f - p.plastic_deformation.det()));
double lambda = LAMBDA * e;
double mu = MU * e;
double Je = Sig.det();
Matrix2D temp = (p.elastic_deformation - U * V.T()) * p.elastic_deformation.T() * 2 * mu + I * lambda * Je * (Je - 1);
temp = temp * p.volume;
// accumulate particle stress to grids
int p_x = (int)p.grid_p.x;
int p_y = (int)p.grid_p.y;
for (int it = 0, y = p_y - 1; y <= p_y + 2; ++y) {
for (int x = p_x - 1; x <= p_x + 2; ++x, ++it) {
if (y < 0 || y >= _size.y || x < 0 || x >= _size.x)
continue;
double w = p.weights[it];
int node_id = int(y * _size.x + x);
//Node& node = nodes[node_id];
if (w > BSPLINE_EPSILON) {
//grid_ptr[node_id].force -= temp * p.weight_gradient[it];
Vector2D value = temp * p.weight_gradient[it];
//my_atomicAdd(&(grid_ptr[node_id].force.x), -value.x);
//my_atomicAdd(&(grid_ptr[node_id].force.y), -value.y);
}
}
}
};
thrust::sort(thrust::device, other_particles.begin(), other_particles.end(), OBCmp());
thrust::for_each(thrust::device, particles.begin(), particles.end(), func);
}
__host__ void Grid::updateVelocity() {
// here is how we update grid velocity
thrust::for_each(
thrust::device,
nodes.begin(),
nodes.end(),
[=] __device__(Node& n) {
double timestep = 0.0001;
Vector2D gravity(0, -9.8);
if (n.active) {
n.vel_new = n.vel + timestep * (gravity + n.force / n.mass);
//printf("updated!\n");
}
}
);
collisionGrid();
}
__host__ void Grid::updateDeformation() {
Node* grid_ptr = thrust::raw_pointer_cast(&nodes[0]);
auto func = [=] __device__(Particle & p) {
Vector2D _origin(0, 0), _node_size(1. / 128., 1. /128.), _size(256 + 1, 128 + 1);
double _node_area = _node_size.x * _node_size.y;
int p_x = (int)p.grid_p.x;
int p_y = (int)p.grid_p.y;
p.velocity_gradient = Matrix2D(0, 0, 0, 0);
for (int it = 0, y = p_y - 1; y <= p_y + 2; ++y) {
for (int x = p_x - 1; x <= p_x + 2; ++x, ++it) {
if (y < 0 || y >= _size.y || x < 0 || x >= _size.x)
continue;
double temp = p.weights[it];
Vector2D delta_w = p.weight_gradient[it];
int node_id = int(y * _size.x + x);
if (temp > BSPLINE_EPSILON) {
p.velocity_gradient += outer_product(grid_ptr[node_id].vel_new, delta_w);
}
}
}
};
thrust::for_each(thrust::device, particles.begin(), particles.end(), func);
auto func2 = [=] __device__(Particle & p) {
Matrix2D I = Matrix2D();
p.elastic_deformation = (I + p.velocity_gradient * TIMESTEP) * p.elastic_deformation;
p.deformation_gradient = p.elastic_deformation * p.plastic_deformation;
Matrix2D U, Sig, V;
svd(p.elastic_deformation, U, Sig, V);
for (int idx = 0; idx < 2; ++idx) {
if (Sig(idx, idx) < CRIT_COMPRESS) {
Sig(idx, idx) = CRIT_COMPRESS;
}
else if (Sig(idx, idx) > CRIT_STRETCH) {
Sig(idx, idx) = CRIT_STRETCH;
}
}
Matrix2D Sig_inv(1.0 / Sig(0, 0), 0, 0, 1.0 / Sig(1, 1));
p.elastic_deformation = U * Sig * V.T();
p.plastic_deformation = V * Sig_inv * U.T() * p.deformation_gradient;
};
thrust::for_each(thrust::device, particles.begin(), particles.end(), func2);
}
// Map back to particles
__host__ void Grid::updateParticlesVelocity() {
Node* grid_ptr = thrust::raw_pointer_cast(&nodes[0]);
auto func = [=] __device__(Particle & p) {
Vector2D _origin(0, 0), _node_size(1. / 128., 1. /128.), _size(256 + 1, 128 + 1);
double _node_area = _node_size.x * _node_size.y;
int p_x = (int)p.grid_p.x;
int p_y = (int)p.grid_p.y;
p.density = 0;
Vector2D v_pic, v_flip = p.vel;
for (int it = 0, y = p_y - 1; y <= p_y + 2; ++y) {
for (int x = p_x - 1; x <= p_x + 2; ++x, ++it) {
if (y < 0 || y >= _size.y || x < 0 || x >= _size.x)
continue;
double w = p.weights[it];
int node_id = int(y * _size.x + x);
if (w > BSPLINE_EPSILON) {
//Node& node = nodes[node_id];
v_pic += grid_ptr[node_id].vel_new * w;
v_flip += (grid_ptr[node_id].vel_new - grid_ptr[node_id].vel) * w;
p.density += w * grid_ptr[node_id].mass;
}
}
}
double flip_percent = .95;
p.vel = v_flip * flip_percent + v_pic * (1 - flip_percent);
p.density /= _node_area;
};
thrust::for_each(thrust::device, particles.begin(), particles.end(), func);
collisionParticles();
}
__host__ void Grid::updateParticlesPosition() {
auto func = [=] __device__(Particle & p) {
double timestep = 0.0001;
p.pos += timestep * p.vel;
};
thrust::for_each(thrust::device, particles.begin(), particles.end(), func);
}
__host__ void Grid::collisionGrid() {
auto func = [=] __device__(Node & n) {
Vector2D _origin(0, 0), _node_size(1. / 128., 1. /128.), _size(256 + 1, 128 + 1);
double _node_area = _node_size.x * _node_size.y;
double timestep = 0.0001;
if (n.active) {
Vector2D delta_scale = Vector2D(timestep, timestep);
delta_scale /= _node_size;
Vector2D new_pos = n.vel_new * delta_scale + n.pos;
if (new_pos.x < BSPLINE_RADIUS || new_pos.x > _size.x - BSPLINE_RADIUS - 1) {
n.vel_new.x = 0;
n.vel_new.y *= STICKY;
}
if (new_pos.y < BSPLINE_RADIUS || new_pos.y > _size.y - BSPLINE_RADIUS - 1) {
n.vel_new.x *= STICKY;
n.vel_new.y = 0;
}
}
};
thrust::for_each(thrust::device, nodes.begin(), nodes.end(), func);
}
__host__ void Grid::collisionParticles() {
auto func = [=] __device__(Particle & p) {
Vector2D _origin(0, 0), _node_size(1. / 128., 1. /128.), _size(256 + 1, 128 + 1);
double _node_area = _node_size.x * _node_size.y;
double timestep = 0.0001;
Vector2D delta_scale = Vector2D(timestep, timestep);
delta_scale /= _node_size;
Vector2D new_pos = p.grid_p + p.vel * delta_scale;
if (new_pos.x < BSPLINE_RADIUS - 1 || new_pos.x > _size.x - BSPLINE_RADIUS) {
p.vel.x *= -STICKY;
}
if (new_pos.y < BSPLINE_RADIUS - 1 || new_pos.y > _size.y - BSPLINE_RADIUS) {
p.vel.y *= -STICKY;
}
};
thrust::for_each(thrust::device, particles.begin(), particles.end(), func);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.