hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
73be3e81b52a6505f68237bf10496ffb034a8bc7.hip
|
// !!! This is a file automatically generated by hipify!!!
/////////////////////////////////////////
// Calcuating Matrix A*B+C (CUDA Version)
// Created by Wang Zong-Sheng
// 2018/10/18
#include <iostream>
using namespace std;
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define A_ROW 3
#define A_COL 2
#define B_ROW 2
#define B_COL 3
#define C_ROW 2
#define C_COL 2
#define MP_NUM 15
#define CORES_PER_MP 192
template <typename T>
__global__ void cuda_matrix_mul(const T *A, const T *B, T *R)
{
int bId = blockIdx.y * gridDim.x + blockIdx.x;
T sum = A[blockIdx.y* A_ROW + threadIdx.x] * B[threadIdx.x * B_ROW + blockIdx.x];
__syncthreads();
//printf("Thread %d In block %d : R = %d, sum = %d\n", threadIdx.x, bId, temp, sum);
atomicAdd(&R[bId], sum);
}
//template <typename T>
//__global__ void cuda_matrix_mul(const T *A, const T *B, T *R)
//{
// int bId = blockIdx.y * gridDim.x + blockIdx.x;
// int sum = 0;
// for(int i=0; i<A_ROW; i++)
// sum += A[blockIdx.y* A_ROW + i] * B[i * B_ROW + blockIdx.x];
// R[bId] = sum;
//
//}
template <typename T>
__global__ void cuda_matrix_add(const T *A, const T *B, T *R)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * blockDim.x + ix;
R[idx] = A[idx] + B[idx];
}
// using CUDA to implement AxB+C
template <typename T>
hipError_t matrix_mul_add_cuda(const T *A, unsigned int a_row, unsigned int a_col,
const T *B, unsigned int b_row, unsigned int b_col,
const T *C, unsigned int c_row, unsigned int c_col,
T *R, T *AB)
{
T *dev_a = 0;
T *dev_b = 0;
T *dev_c = 0;
T *dev_ab = 0;
T *dev_r = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
printf("hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for matrics
cudaStatus = hipMalloc((void**)&dev_a, a_row * a_col * sizeof(T));
if (cudaStatus != hipSuccess) {
printf("hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, b_row * b_col * sizeof(T));
if (cudaStatus != hipSuccess) {
printf("hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_c, c_row * c_col * sizeof(T));
if (cudaStatus != hipSuccess) {
printf("hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_ab, b_row * a_col * sizeof(T));
if (cudaStatus != hipSuccess) {
printf("hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_r, c_row * c_col * sizeof(T));
if (cudaStatus != hipSuccess) {
printf("hipMalloc failed!");
goto Error;
}
// Copy input matrics from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, A, a_row * a_col * sizeof(T), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
printf("hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, B, b_row * b_col * sizeof(T), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
printf("hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_c, C, c_row * c_col * sizeof(T), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
printf("hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU
// In our case, K40c GPU has 15MP, and 192cores per MP
dim3 grids(B_ROW, A_COL);
//cuda_matrix_mul <T> <<<grids, 1>>> (dev_a, dev_b, dev_ab);
hipLaunchKernelGGL(( cuda_matrix_mul <T>) , dim3(grids), dim3(A_ROW) , 0, 0, dev_a, dev_b, dev_ab);
hipDeviceSynchronize();
dim3 threads(C_ROW, C_COL);
hipLaunchKernelGGL(( cuda_matrix_add <T>) , dim3(1), dim3(threads), 0, 0, dev_ab, dev_c, dev_r);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
printf("addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
printf("hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(AB, dev_ab, b_row * a_col * sizeof(T), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
printf("hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(R, dev_r, c_row * c_col * sizeof(T), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
printf("hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_a);
hipFree(dev_a);
hipFree(dev_c);
hipFree(dev_ab);
hipFree(dev_r);
return cudaStatus;
}
template <typename T>
void print_matrix(T *M, unsigned int row, unsigned int col) {
for (unsigned int c = 0; c < col; c++) {
for (unsigned int r = 0; r < row; r++) {
cout << M[c*row + r] << ", ";
}
cout << endl;
}
}
int main()
{
const int A[A_ROW*A_COL] = { 1, 0, -3,
-2, 4, 1 };
const int B[B_ROW*B_COL] = { 2, -1,
3, 0,
-5, 2 };
const int C[C_ROW*C_COL] = { 3, -1,
-2, 2 };
int AB[A_COL*B_ROW];
int R[C_ROW*C_COL];
hipError_t cudaStatus;
cudaStatus = matrix_mul_add_cuda<int>(A, A_ROW, A_COL, B, B_ROW, B_COL, C, C_ROW, C_COL, R, AB);
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
//cout << "hipDeviceReset failed!" << endl;
return 1;
}
// for printing results
cout << "A = " << endl;
print_matrix<const int>(A, A_ROW, A_COL);
cout << endl << "B = " << endl;
print_matrix<const int>(B, B_ROW, B_COL);
cout << endl << "C = " << endl;
print_matrix<const int>(C, C_ROW, C_COL);
cout << endl << "Result:" << endl;
cout << "A x B = " << endl;
print_matrix(AB, B_ROW, A_COL);
cout << endl << "A x B + C = " << endl;
print_matrix<int>(R, C_ROW, C_COL);
return 0;
}
|
73be3e81b52a6505f68237bf10496ffb034a8bc7.cu
|
/////////////////////////////////////////
// Calcuating Matrix A*B+C (CUDA Version)
// Created by Wang Zong-Sheng
// 2018/10/18
#include <iostream>
using namespace std;
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define A_ROW 3
#define A_COL 2
#define B_ROW 2
#define B_COL 3
#define C_ROW 2
#define C_COL 2
#define MP_NUM 15
#define CORES_PER_MP 192
template <typename T>
__global__ void cuda_matrix_mul(const T *A, const T *B, T *R)
{
int bId = blockIdx.y * gridDim.x + blockIdx.x;
T sum = A[blockIdx.y* A_ROW + threadIdx.x] * B[threadIdx.x * B_ROW + blockIdx.x];
__syncthreads();
//printf("Thread %d In block %d : R = %d, sum = %d\n", threadIdx.x, bId, temp, sum);
atomicAdd(&R[bId], sum);
}
//template <typename T>
//__global__ void cuda_matrix_mul(const T *A, const T *B, T *R)
//{
// int bId = blockIdx.y * gridDim.x + blockIdx.x;
// int sum = 0;
// for(int i=0; i<A_ROW; i++)
// sum += A[blockIdx.y* A_ROW + i] * B[i * B_ROW + blockIdx.x];
// R[bId] = sum;
//
//}
template <typename T>
__global__ void cuda_matrix_add(const T *A, const T *B, T *R)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * blockDim.x + ix;
R[idx] = A[idx] + B[idx];
}
// using CUDA to implement AxB+C
template <typename T>
cudaError_t matrix_mul_add_cuda(const T *A, unsigned int a_row, unsigned int a_col,
const T *B, unsigned int b_row, unsigned int b_col,
const T *C, unsigned int c_row, unsigned int c_col,
T *R, T *AB)
{
T *dev_a = 0;
T *dev_b = 0;
T *dev_c = 0;
T *dev_ab = 0;
T *dev_r = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
printf("cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for matrics
cudaStatus = cudaMalloc((void**)&dev_a, a_row * a_col * sizeof(T));
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, b_row * b_col * sizeof(T));
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_c, c_row * c_col * sizeof(T));
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_ab, b_row * a_col * sizeof(T));
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_r, c_row * c_col * sizeof(T));
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc failed!");
goto Error;
}
// Copy input matrics from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, A, a_row * a_col * sizeof(T), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, B, b_row * b_col * sizeof(T), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_c, C, c_row * c_col * sizeof(T), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU
// In our case, K40c GPU has 15MP, and 192cores per MP
dim3 grids(B_ROW, A_COL);
//cuda_matrix_mul <T> <<<grids, 1>>> (dev_a, dev_b, dev_ab);
cuda_matrix_mul <T> <<<grids, A_ROW >>> (dev_a, dev_b, dev_ab);
cudaDeviceSynchronize();
dim3 threads(C_ROW, C_COL);
cuda_matrix_add <T> <<<1, threads>>> (dev_ab, dev_c, dev_r);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
printf("addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
printf("cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(AB, dev_ab, b_row * a_col * sizeof(T), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
printf("cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(R, dev_r, c_row * c_col * sizeof(T), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
printf("cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_a);
cudaFree(dev_a);
cudaFree(dev_c);
cudaFree(dev_ab);
cudaFree(dev_r);
return cudaStatus;
}
template <typename T>
void print_matrix(T *M, unsigned int row, unsigned int col) {
for (unsigned int c = 0; c < col; c++) {
for (unsigned int r = 0; r < row; r++) {
cout << M[c*row + r] << ", ";
}
cout << endl;
}
}
int main()
{
const int A[A_ROW*A_COL] = { 1, 0, -3,
-2, 4, 1 };
const int B[B_ROW*B_COL] = { 2, -1,
3, 0,
-5, 2 };
const int C[C_ROW*C_COL] = { 3, -1,
-2, 2 };
int AB[A_COL*B_ROW];
int R[C_ROW*C_COL];
cudaError_t cudaStatus;
cudaStatus = matrix_mul_add_cuda<int>(A, A_ROW, A_COL, B, B_ROW, B_COL, C, C_ROW, C_COL, R, AB);
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
//cout << "cudaDeviceReset failed!" << endl;
return 1;
}
// for printing results
cout << "A = " << endl;
print_matrix<const int>(A, A_ROW, A_COL);
cout << endl << "B = " << endl;
print_matrix<const int>(B, B_ROW, B_COL);
cout << endl << "C = " << endl;
print_matrix<const int>(C, C_ROW, C_COL);
cout << endl << "Result:" << endl;
cout << "A x B = " << endl;
print_matrix(AB, B_ROW, A_COL);
cout << endl << "A x B + C = " << endl;
print_matrix<int>(R, C_ROW, C_COL);
return 0;
}
|
b7e8b950486b32a6e802d541a2e110aa24eaf03a.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
mallocMC: Memory Allocator for Many Core Architectures.
https://www.hzdr.de/crp
Copyright 2014 Institute of Radiation Physics,
Helmholtz-Zentrum Dresden - Rossendorf
Author(s): Carlchristian Eckert - c.eckert ( at ) hzdr.de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <iostream>
#include <cassert>
#include <vector>
#include <numeric>
#include <hip/hip_runtime.h>
#include <boost/mpl/int.hpp>
#include <boost/mpl/bool.hpp>
///////////////////////////////////////////////////////////////////////////////
// includes for mallocMC
///////////////////////////////////////////////////////////////////////////////
// basic files for mallocMC
#include "src/include/mallocMC/mallocMC_hostclass.hpp"
// Load all available policies for mallocMC
#include "src/include/mallocMC/CreationPolicies.hpp"
#include "src/include/mallocMC/DistributionPolicies.hpp"
#include "src/include/mallocMC/OOMPolicies.hpp"
#include "src/include/mallocMC/ReservePoolPolicies.hpp"
#include "src/include/mallocMC/AlignmentPolicies.hpp"
///////////////////////////////////////////////////////////////////////////////
// Configuration for mallocMC
///////////////////////////////////////////////////////////////////////////////
// configurate the CreationPolicy "Scatter"
struct ScatterConfig{
typedef boost::mpl::int_<4096> pagesize;
typedef boost::mpl::int_<8> accessblocks;
typedef boost::mpl::int_<16> regionsize;
typedef boost::mpl::int_<2> wastefactor;
typedef boost::mpl::bool_<false> resetfreedpages;
};
struct ScatterHashParams{
typedef boost::mpl::int_<38183> hashingK;
typedef boost::mpl::int_<17497> hashingDistMP;
typedef boost::mpl::int_<1> hashingDistWP;
typedef boost::mpl::int_<1> hashingDistWPRel;
};
// configure the DistributionPolicy "XMallocSIMD"
struct DistributionConfig{
typedef ScatterConfig::pagesize pagesize;
};
// configure the AlignmentPolicy "Shrink"
struct AlignmentConfig{
typedef boost::mpl::int_<16> dataAlignment;
};
// Define a new mMCator and call it ScatterAllocator
// which resembles the behaviour of ScatterAlloc
typedef mallocMC::Allocator<
mallocMC::CreationPolicies::Scatter<ScatterConfig,ScatterHashParams>,
mallocMC::DistributionPolicies::XMallocSIMD<DistributionConfig>,
mallocMC::OOMPolicies::ReturnNull,
mallocMC::ReservePoolPolicies::SimpleCudaMalloc,
mallocMC::AlignmentPolicies::Shrink<AlignmentConfig>
> ScatterAllocator;
///////////////////////////////////////////////////////////////////////////////
// End of mallocMC configuration
///////////////////////////////////////////////////////////////////////////////
void run();
int main()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
if( deviceProp.major < int(2) ) {
std::cerr << "Error: Compute Capability >= 2.0 required. (is ";
std::cerr << deviceProp.major << "."<< deviceProp.minor << ")" << std::endl;
return 1;
}
hipSetDevice(0);
run();
hipDeviceReset();
return 0;
}
__device__ int** arA;
__device__ int** arB;
__device__ int** arC;
__global__ void createArrayPointers(int x, int y, ScatterAllocator::AllocatorHandle mMC){
arA = (int**) mMC.malloc(sizeof(int*) * x*y);
arB = (int**) mMC.malloc(sizeof(int*) * x*y);
arC = (int**) mMC.malloc(sizeof(int*) * x*y);
}
__global__ void fillArrays(int length, int* d, ScatterAllocator::AllocatorHandle mMC){
int id = threadIdx.x + blockIdx.x*blockDim.x;
arA[id] = (int*) mMC.malloc(sizeof(int)*length);
arB[id] = (int*) mMC.malloc(sizeof(int)*length);
arC[id] = (int*) mMC.malloc(sizeof(int)*length);
for(int i=0 ; i<length; ++i){
arA[id][i] = id*length+i;
arB[id][i] = id*length+i;
}
}
__global__ void addArrays(int length, int* d){
int id = threadIdx.x + blockIdx.x*blockDim.x;
d[id] = 0;
for(int i=0 ; i<length; ++i){
arC[id][i] = arA[id][i] + arB[id][i];
d[id] += arC[id][i];
}
}
__global__ void freeArrays(ScatterAllocator::AllocatorHandle mMC){
int id = threadIdx.x + blockIdx.x*blockDim.x;
mMC.free(arA[id]);
mMC.free(arB[id]);
mMC.free(arC[id]);
}
__global__ void freeArrayPointers(ScatterAllocator::AllocatorHandle mMC){
mMC.free(arA);
mMC.free(arB);
mMC.free(arC);
}
void run()
{
size_t block = 32;
size_t grid = 32;
int length = 100;
assert((unsigned)length <= block*grid); //necessary for used algorithm
//init the heap
std::cerr << "initHeap...";
ScatterAllocator mMC(1U*1024U*1024U*1024U); //1GB for device-side malloc
std::cerr << "done" << std::endl;
// device-side pointers
int* d;
hipMalloc((void**) &d, sizeof(int)*block*grid);
// host-side pointers
std::vector<int> array_sums(block*grid,0);
// create arrays of arrays on the device
hipLaunchKernelGGL(( createArrayPointers), dim3(1),dim3(1), 0, 0, grid, block, mMC );
// fill 2 of them all with ascending values
hipLaunchKernelGGL(( fillArrays), dim3(grid),dim3(block), 0, 0, length, d, mMC );
// add the 2 arrays (vector addition within each thread)
// and do a thread-wise reduce to d
hipLaunchKernelGGL(( addArrays), dim3(grid),dim3(block), 0, 0, length, d);
hipMemcpy(&array_sums[0], d, sizeof(int)*block*grid, hipMemcpyDeviceToHost);
int sum = std::accumulate(array_sums.begin(), array_sums.end(), 0);
std::cout << "The sum of the arrays on GPU is " << sum << std::endl;
int n = block*grid*length;
int gaussian = n*(n-1);
std::cout << "The gaussian sum as comparison: " << gaussian << std::endl;
// checking the free memory of the allocator
if(mallocMC::Traits<ScatterAllocator>::providesAvailableSlots){
std::cout << "there are ";
std::cout << mMC.getAvailableSlots(1024U*1024U);
std::cout << " Slots of size 1MB available" << std::endl;
}
hipLaunchKernelGGL(( freeArrays), dim3(grid), dim3(block), 0, 0, mMC );
hipLaunchKernelGGL(( freeArrayPointers), dim3(1), dim3(1), 0, 0, mMC );
hipFree(d);
}
|
b7e8b950486b32a6e802d541a2e110aa24eaf03a.cu
|
/*
mallocMC: Memory Allocator for Many Core Architectures.
https://www.hzdr.de/crp
Copyright 2014 Institute of Radiation Physics,
Helmholtz-Zentrum Dresden - Rossendorf
Author(s): Carlchristian Eckert - c.eckert ( at ) hzdr.de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <iostream>
#include <cassert>
#include <vector>
#include <numeric>
#include <cuda.h>
#include <boost/mpl/int.hpp>
#include <boost/mpl/bool.hpp>
///////////////////////////////////////////////////////////////////////////////
// includes for mallocMC
///////////////////////////////////////////////////////////////////////////////
// basic files for mallocMC
#include "src/include/mallocMC/mallocMC_hostclass.hpp"
// Load all available policies for mallocMC
#include "src/include/mallocMC/CreationPolicies.hpp"
#include "src/include/mallocMC/DistributionPolicies.hpp"
#include "src/include/mallocMC/OOMPolicies.hpp"
#include "src/include/mallocMC/ReservePoolPolicies.hpp"
#include "src/include/mallocMC/AlignmentPolicies.hpp"
///////////////////////////////////////////////////////////////////////////////
// Configuration for mallocMC
///////////////////////////////////////////////////////////////////////////////
// configurate the CreationPolicy "Scatter"
struct ScatterConfig{
typedef boost::mpl::int_<4096> pagesize;
typedef boost::mpl::int_<8> accessblocks;
typedef boost::mpl::int_<16> regionsize;
typedef boost::mpl::int_<2> wastefactor;
typedef boost::mpl::bool_<false> resetfreedpages;
};
struct ScatterHashParams{
typedef boost::mpl::int_<38183> hashingK;
typedef boost::mpl::int_<17497> hashingDistMP;
typedef boost::mpl::int_<1> hashingDistWP;
typedef boost::mpl::int_<1> hashingDistWPRel;
};
// configure the DistributionPolicy "XMallocSIMD"
struct DistributionConfig{
typedef ScatterConfig::pagesize pagesize;
};
// configure the AlignmentPolicy "Shrink"
struct AlignmentConfig{
typedef boost::mpl::int_<16> dataAlignment;
};
// Define a new mMCator and call it ScatterAllocator
// which resembles the behaviour of ScatterAlloc
typedef mallocMC::Allocator<
mallocMC::CreationPolicies::Scatter<ScatterConfig,ScatterHashParams>,
mallocMC::DistributionPolicies::XMallocSIMD<DistributionConfig>,
mallocMC::OOMPolicies::ReturnNull,
mallocMC::ReservePoolPolicies::SimpleCudaMalloc,
mallocMC::AlignmentPolicies::Shrink<AlignmentConfig>
> ScatterAllocator;
///////////////////////////////////////////////////////////////////////////////
// End of mallocMC configuration
///////////////////////////////////////////////////////////////////////////////
void run();
int main()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
if( deviceProp.major < int(2) ) {
std::cerr << "Error: Compute Capability >= 2.0 required. (is ";
std::cerr << deviceProp.major << "."<< deviceProp.minor << ")" << std::endl;
return 1;
}
cudaSetDevice(0);
run();
cudaDeviceReset();
return 0;
}
__device__ int** arA;
__device__ int** arB;
__device__ int** arC;
__global__ void createArrayPointers(int x, int y, ScatterAllocator::AllocatorHandle mMC){
arA = (int**) mMC.malloc(sizeof(int*) * x*y);
arB = (int**) mMC.malloc(sizeof(int*) * x*y);
arC = (int**) mMC.malloc(sizeof(int*) * x*y);
}
__global__ void fillArrays(int length, int* d, ScatterAllocator::AllocatorHandle mMC){
int id = threadIdx.x + blockIdx.x*blockDim.x;
arA[id] = (int*) mMC.malloc(sizeof(int)*length);
arB[id] = (int*) mMC.malloc(sizeof(int)*length);
arC[id] = (int*) mMC.malloc(sizeof(int)*length);
for(int i=0 ; i<length; ++i){
arA[id][i] = id*length+i;
arB[id][i] = id*length+i;
}
}
__global__ void addArrays(int length, int* d){
int id = threadIdx.x + blockIdx.x*blockDim.x;
d[id] = 0;
for(int i=0 ; i<length; ++i){
arC[id][i] = arA[id][i] + arB[id][i];
d[id] += arC[id][i];
}
}
__global__ void freeArrays(ScatterAllocator::AllocatorHandle mMC){
int id = threadIdx.x + blockIdx.x*blockDim.x;
mMC.free(arA[id]);
mMC.free(arB[id]);
mMC.free(arC[id]);
}
__global__ void freeArrayPointers(ScatterAllocator::AllocatorHandle mMC){
mMC.free(arA);
mMC.free(arB);
mMC.free(arC);
}
void run()
{
size_t block = 32;
size_t grid = 32;
int length = 100;
assert((unsigned)length <= block*grid); //necessary for used algorithm
//init the heap
std::cerr << "initHeap...";
ScatterAllocator mMC(1U*1024U*1024U*1024U); //1GB for device-side malloc
std::cerr << "done" << std::endl;
// device-side pointers
int* d;
cudaMalloc((void**) &d, sizeof(int)*block*grid);
// host-side pointers
std::vector<int> array_sums(block*grid,0);
// create arrays of arrays on the device
createArrayPointers<<<1,1>>>(grid, block, mMC );
// fill 2 of them all with ascending values
fillArrays<<<grid,block>>>(length, d, mMC );
// add the 2 arrays (vector addition within each thread)
// and do a thread-wise reduce to d
addArrays<<<grid,block>>>(length, d);
cudaMemcpy(&array_sums[0], d, sizeof(int)*block*grid, cudaMemcpyDeviceToHost);
int sum = std::accumulate(array_sums.begin(), array_sums.end(), 0);
std::cout << "The sum of the arrays on GPU is " << sum << std::endl;
int n = block*grid*length;
int gaussian = n*(n-1);
std::cout << "The gaussian sum as comparison: " << gaussian << std::endl;
// checking the free memory of the allocator
if(mallocMC::Traits<ScatterAllocator>::providesAvailableSlots){
std::cout << "there are ";
std::cout << mMC.getAvailableSlots(1024U*1024U);
std::cout << " Slots of size 1MB available" << std::endl;
}
freeArrays<<<grid, block>>>( mMC );
freeArrayPointers<<<1, 1>>>( mMC );
cudaFree(d);
}
|
ba6d5d2aa0e6ebf1e7457e5eb970928adb158877.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
#define CASENAME "test"
#define BLOCKSIZEX 64
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 64
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define XDIM 128
#define YDIM 32
#define ZDIM 32
#define TMAX 2
#define STARTF 0
#define OBSTR 1.f
#define OBSTX 64.25f
#define OBSTY 16.25f
#define OBSTZ 16.25f
#define LRFACTOR 0.5f
#define LRLEVEL 2
#define LRX0 31.75f //minimum x coord of LR
#define XLRDIM 128 //number of nodes in x
#define LRY0 7.75f
#define YLRDIM 32
#define LRZ0 7.75f
#define ZLRDIM 32
#define RE 20.f//100.f;
#define UMAX 0.08f
#define METHOD "HYB" //SINGLE,HYB,TEXT,SHARED
#define REFINEMENT "YES" //YES,NO
#define MODEL "MRT" //BGK,MRT,STREAM
//#define CHARLENGTH = XDIM-2.f;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
#include <sys/time.h>
#include <time.h>
/*
Image List:
0 fluid
1 BB
2
3 DirichletWest(simple)
10 BB(force)
13 DirichletWest_Reg
14 NeumannEast_Reg
15 DirichletNorth_Reg
16 DirichletSouth_Reg
21 ysymmetry_top
22 ysymmetry_bot
23 zsymmetry_top
24 zsymmetry_bot
*/
inline __device__ int ImageFcn(float x, float y, float z){
//if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY)*(y-OBSTY))<OBSTR*OBSTR)
if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY)*(y-OBSTY)+(z-OBSTZ)*(z-OBSTZ))<OBSTR*OBSTR)
{
return 10;
}
else
return 10;
//if(y < 0.1f || z < 0.1f || (XDIM-x) < 0.1f || (YDIM-y) < 0.1f || (ZDIM-z) < 0.1f)
// if(y < 17.5f || z < 17.5f || y > 46.5f || z > 46.5f)
// return 1;
// else if(x < 17.5f)
// return 13;
// else if(x > 78.5f)
// return 14;
// else
}
inline __device__ int ImageFcn(int x, int y, int z){
//Cylinder
// if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY)*(y-OBSTY))<OBSTR*OBSTR)
//Sphere
if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY)*(y-OBSTY)+(z-OBSTZ)*(z-OBSTZ))<OBSTR*OBSTR)
{
// if(z == 0 || z == ZDIM-1)
// return 1;
// else
return 10;
}
else if(x == 0)
return 1;//3;
else if(x == XDIM-1)
return 1;//4;
else if(y == 0)
return 1;//22;//22;
else if(y == YDIM-1)
return 1;//21;
else if(z == 0)
return 1;//24;
else if(z == ZDIM-1)
return 1;//23;
else
return 0;
//Lid Driven Cavity
// if(x == 0)
// return 3;
// else if(x == XDIM-1 || y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1)
// return 1;
// else
// return 0;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
// return 1.f;
}
__device__ void DirichletWest(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == YDIM-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = 0.0f;//UMAX;//*PoisProf(zcoord)*1.5;
v = UMAX;//0.0;
w = 0.0f;
//rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
__device__ void DirichletWest_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == YDIM-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(y)*1.5;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f1 = f3+0.33333333f*u;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f5 = f7+0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f10= f17+0.166666667f*(u+w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f15= f12+0.166666667f*(u-w);
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void NeumannEast_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f3 = f1 -0.333333333f*u;
f7 = f5 -0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f17= f10-0.166666667f*(u+w);
f12= f15-0.166666667f*(u-w);
// f3 =(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)+
// (f1-(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2));
// f7 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)+
// (f5-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v));
// f6 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)+
// (f8-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v));
// f17=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)+
// (f10-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w));
// f12=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)+
// (f15-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w));
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletNorth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f4 = f2-0.33333333f*v;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f7 = f5-0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f13= f16-0.166666667f*(v-w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f18= f11-0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletSouth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f2 = f4 +0.33333333f*v;
f5 = f7 +0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f16= f13+0.166666667f*(v-w);
f11= f18+0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void ysymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
__device__ void ysymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
__device__ void zsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
__device__ void zsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
__device__ void boundaries(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 13)//DirichletWest
{
DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 14)//DirichletWest
{
NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
}
texture<float,2,hipReadModeElementType> texRef_f0A;
texture<float,2,hipReadModeElementType> texRef_f1A;
texture<float,2,hipReadModeElementType> texRef_f2A;
texture<float,2,hipReadModeElementType> texRef_f3A;
texture<float,2,hipReadModeElementType> texRef_f4A;
texture<float,2,hipReadModeElementType> texRef_f5A;
texture<float,2,hipReadModeElementType> texRef_f6A;
texture<float,2,hipReadModeElementType> texRef_f7A;
texture<float,2,hipReadModeElementType> texRef_f8A;
texture<float,2,hipReadModeElementType> texRef_f9A;
texture<float,2,hipReadModeElementType> texRef_f10A;
texture<float,2,hipReadModeElementType> texRef_f11A;
texture<float,2,hipReadModeElementType> texRef_f12A;
texture<float,2,hipReadModeElementType> texRef_f13A;
texture<float,2,hipReadModeElementType> texRef_f14A;
texture<float,2,hipReadModeElementType> texRef_f15A;
texture<float,2,hipReadModeElementType> texRef_f16A;
texture<float,2,hipReadModeElementType> texRef_f17A;
texture<float,2,hipReadModeElementType> texRef_f18A;
texture<float,2,hipReadModeElementType> texRef_f0B;
texture<float,2,hipReadModeElementType> texRef_f1B;
texture<float,2,hipReadModeElementType> texRef_f2B;
texture<float,2,hipReadModeElementType> texRef_f3B;
texture<float,2,hipReadModeElementType> texRef_f4B;
texture<float,2,hipReadModeElementType> texRef_f5B;
texture<float,2,hipReadModeElementType> texRef_f6B;
texture<float,2,hipReadModeElementType> texRef_f7B;
texture<float,2,hipReadModeElementType> texRef_f8B;
texture<float,2,hipReadModeElementType> texRef_f9B;
texture<float,2,hipReadModeElementType> texRef_f10B;
texture<float,2,hipReadModeElementType> texRef_f11B;
texture<float,2,hipReadModeElementType> texRef_f12B;
texture<float,2,hipReadModeElementType> texRef_f13B;
texture<float,2,hipReadModeElementType> texRef_f14B;
texture<float,2,hipReadModeElementType> texRef_f15B;
texture<float,2,hipReadModeElementType> texRef_f16B;
texture<float,2,hipReadModeElementType> texRef_f17B;
texture<float,2,hipReadModeElementType> texRef_f18B;
texture<float,2,hipReadModeElementType> texRef_f0C;
texture<float,2,hipReadModeElementType> texRef_f1C;
texture<float,2,hipReadModeElementType> texRef_f2C;
texture<float,2,hipReadModeElementType> texRef_f3C;
texture<float,2,hipReadModeElementType> texRef_f4C;
texture<float,2,hipReadModeElementType> texRef_f5C;
texture<float,2,hipReadModeElementType> texRef_f6C;
texture<float,2,hipReadModeElementType> texRef_f7C;
texture<float,2,hipReadModeElementType> texRef_f8C;
texture<float,2,hipReadModeElementType> texRef_f9C;
texture<float,2,hipReadModeElementType> texRef_f10C;
texture<float,2,hipReadModeElementType> texRef_f11C;
texture<float,2,hipReadModeElementType> texRef_f12C;
texture<float,2,hipReadModeElementType> texRef_f13C;
texture<float,2,hipReadModeElementType> texRef_f14C;
texture<float,2,hipReadModeElementType> texRef_f15C;
texture<float,2,hipReadModeElementType> texRef_f16C;
texture<float,2,hipReadModeElementType> texRef_f17C;
texture<float,2,hipReadModeElementType> texRef_f18C;
texture<float,2,hipReadModeElementType> texRef_f0D;
texture<float,2,hipReadModeElementType> texRef_f1D;
texture<float,2,hipReadModeElementType> texRef_f2D;
texture<float,2,hipReadModeElementType> texRef_f3D;
texture<float,2,hipReadModeElementType> texRef_f4D;
texture<float,2,hipReadModeElementType> texRef_f5D;
texture<float,2,hipReadModeElementType> texRef_f6D;
texture<float,2,hipReadModeElementType> texRef_f7D;
texture<float,2,hipReadModeElementType> texRef_f8D;
texture<float,2,hipReadModeElementType> texRef_f9D;
texture<float,2,hipReadModeElementType> texRef_f10D;
texture<float,2,hipReadModeElementType> texRef_f11D;
texture<float,2,hipReadModeElementType> texRef_f12D;
texture<float,2,hipReadModeElementType> texRef_f13D;
texture<float,2,hipReadModeElementType> texRef_f14D;
texture<float,2,hipReadModeElementType> texRef_f15D;
texture<float,2,hipReadModeElementType> texRef_f16D;
texture<float,2,hipReadModeElementType> texRef_f17D;
texture<float,2,hipReadModeElementType> texRef_f18D;
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
inline __device__ void bgk_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
f0 = f0 -omega*(f0 -0.3333333333f*(rho-1.5f*usqr));
f1 = f1 -omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f2 = f2 -omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
f3 = f3 -omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
f4 = f4 -omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
f5 = f5 -omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
f6 = f6 -omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
f7 = f7 -omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
f8 = f8 -omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
f9 = f9 -omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
f10= f10-omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
f11= f11-omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
f12= f12-omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
f13= f13-omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
f14= f14-omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
f15= f15-omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
f16= f16-omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
f17= f17-omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
f18= f18-omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
}
inline __device__ void mrt_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
//f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
//f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
//f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
//f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
//f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
//f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
//f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
//f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
//f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
//f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
//f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
//f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
//f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
//f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
//f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
//f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
//f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
//f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
//f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
//f0 -= - 0.012531328f*(m1)+ 0.047619048f*(m2);
//f1 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10);
//f2 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m6) + -0.027777778f*(m9)*omega + 0.027777778f*(m10);
//f3 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10);
//f4 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m6) + -0.027777778f*(m9)*omega + 0.027777778f*(m10);
//f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f9 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10);
//f10 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f11 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
//f12 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f13 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
//f14 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10);
//f15 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f16 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
//f17 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f18 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
//
//f2 -= 0.083333333f*(m11)*omega + -0.083333333f*(m12);
//f4 -= 0.083333333f*(m11)*omega + -0.083333333f*(m12);
//f5 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ ( 0.25f*(m13) )*omega;
//f6 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ (-0.25f*(m13) )*omega;
//f7 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ ( 0.25f*(m13) )*omega;
//f8 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ (-0.25f*(m13) )*omega;
//f9 -= -0.083333333f*(m11)*omega + 0.083333333f*(m12);
//f10 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + 0.25f*(m15))*omega ;
//f11 -= +( 0.25f*(m14) )*omega ;
//f12 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + -0.25f*(m15))*omega ;
//f13 -= +( -0.25f*(m14) )*omega ;
//f14 -= -0.083333333f*(m11)*omega + 0.083333333f*(m12);
//f15 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + -0.25f*(m15))*omega ;
//f16 -= +( -0.25f*(m14) )*omega ;
//f17 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + 0.25f*(m15))*omega ;
//f18 -= +( 0.25f*(m14) )*omega ;
//
//f5 -= 0.125f*(m16)+ -0.125f*(m17);
//f6 -= -0.125f*(m16)+ -0.125f*(m17);
//f7 -= -0.125f*(m16)+ 0.125f*(m17);
//f8 -= 0.125f*(m16)+ 0.125f*(m17);
//f10 -= -0.125f*(m16) + 0.125f*(m18);
//f11 -= + 0.125f*(m17)+ -0.125f*(m18);
//f12 -= 0.125f*(m16) + 0.125f*(m18);
//f13 -= + -0.125f*(m17)+ -0.125f*(m18);
//f15 -= -0.125f*(m16) + -0.125f*(m18);
//f16 -= + 0.125f*(m17)+ 0.125f*(m18);
//f17 -= 0.125f*(m16) + -0.125f*(m18);
//f18 -= + -0.125f*(m17)+ 0.125f*(m18);
}
//{
// float u,v,w;
//// float rho = f1+f2+f4+f6+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
//// u = u*rho;
//// v = v*rho;
//// w = w*rho;
//
// float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//
//
// //COMPUTE M-MEQ
// m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
// m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
// m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
// m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
// m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18 -(2.f*u*u-(v*v+w*w));
// m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
// m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 -(v*v-w*w);
// m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
// m13 = f5+ - f6+ f7+ - f8 -u*v;
// m14 = f11 + - f13 + - f16 + f18 -v*w;
// m15 = f10 + - f12 + - f15 + f17 -u*w;
// m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
// m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
// m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
//
//
//f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
//f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
//f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
//f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
//f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
//f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
//f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
//f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
//f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
//f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
//f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
//f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
//f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
//f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
//f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
//f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
//f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
//f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
//f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
//}
inline __device__ void mrt_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
// u = rho*u;
// v = rho*v;
// w = rho*w;
// float meq0 = rho;
// float meq1 = -11.f*rho+19.f*(u*u+v*v+w*w);
// float meq2 = -7.53968254f*(u*u+v*v+w*w);
// float meq3 = u;
// float meq4 = -0.66666667f*u;//qx_eq
// float meq5 = v;
// float meq6 = -0.66666667f*v;//qx_eq
// float meq7 = w;
// float meq8 = -0.66666667f*w;//qx_eq
// float meq9 = (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq
// float meq11= (v*v-w*w);//pww_eq
// float meq13= u*v;//pxy_eq
// float meq14= v*w;//pyz_eq
// float meq15= u*w;//pxz_eq
float usqr = u*u+v*v+w*w;
//float feq0 = 0.1904761791f*rho+ -0.5971277356f*usqr ;
//float feq1 = 0.1031746045f*rho+ 0.0323759168f*usqr+ (0.166666667f*u) + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.0323759168f*usqr + (0.166666667f*v) +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.0323759168f*usqr+ -(0.166666667f*u) + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.0323759168f*usqr + -(0.166666667f*v) +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.0335726887f*usqr+ 0.1f*(u+v)+ 0.0166666667f*(-u-v) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.0335726887f*usqr+ -0.1f*(u-v)+ -0.0166666667f*(-u+v) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.0335726887f*usqr+ -0.1f*(u+v)+ -0.0166666667f*(-u-v) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.0335726887f*usqr+ 0.1f*(u-v)+ 0.0166666667f*(-u+v) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.0323759168f*usqr + (0.166666667f*w)+-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
//float feq10= 0.0158730149f*rho+ 0.0335726887f*usqr+ 0.1f*(u+w)+ 0.0166666667f*(-u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.0335726887f*usqr + 0.1f*(v+w)+ 0.0166666667f*(-v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.0335726887f*usqr+ -0.1f*(u-w)+ -0.0166666667f*(-u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.0335726887f*usqr + -0.1f*(v-w)+ -0.0166666667f*(-v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.0323759168f*usqr + -(0.166666667f*w)+-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
//float feq15= 0.0158730149f*rho+ 0.0335726887f*usqr+ 0.1f*(u-w)+ 0.0166666667f*(-u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.0335726887f*usqr + 0.1f*(v-w)+ 0.0166666667f*(-v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.0335726887f*usqr+ -0.1f*(u+w)+ -0.0166666667f*(-u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.0335726887f*usqr+ + -0.1f*(v+w)+ -0.0166666667f*(-v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq0 = 0.052631579f*rho+ -0.012531328f*(-11.f*rho+19.f*usqr)+ 0.047619048f*(-7.53968254*usqr) ;
//float feq1 = 0.052631579f*rho+ -0.0045948204f*(-11.f*rho+19.f*usqr)+ -0.015873016f*(-7.53968254*usqr)+ 0.1f*(meq3-meq4) ;
//float feq2 = 0.052631579f*rho+ -0.0045948204f*(-11.f*rho+19.f*usqr)+ -0.015873016f*(-7.53968254*usqr) + 0.1f*(meq5-meq6) ;
//float feq3 = 0.052631579f*rho+ -0.0045948204f*(-11.f*rho+19.f*usqr)+ -0.015873016f*(-7.53968254*usqr)+ -0.1f*(meq3-meq4) ;
//float feq4 = 0.052631579f*rho+ -0.0045948204f*(-11.f*rho+19.f*usqr)+ -0.015873016f*(-7.53968254*usqr) + -0.1f*(meq5-meq6) ;
//float feq5 = 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ 0.1f*(meq3+meq5)+ 0.025f*(meq4+meq6) ;
//float feq6 = 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ -0.1f*(meq3-meq5)+ -0.025f*(meq4-meq6) ;
//float feq7 = 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ -0.1f*(meq3+meq5)+ -0.025f*(meq4+meq6) ;
//float feq8 = 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ 0.1f*(meq3-meq5)+ 0.025f*(meq4-meq6) ;
//float feq9 = 0.052631579f*rho+ -0.0045948204f*(-11.f*rho+19.f*usqr)+ -0.015873016f*(-7.53968254*usqr) + 0.1f*(meq7-meq8);
//float feq10= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ 0.1f*(meq3+meq7)+ 0.025f*(meq4+meq8) ;
//float feq11= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr) + 0.1f*(meq5+meq7)+ 0.025f*(meq6+meq8);
//float feq12= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ -0.1f*(meq3-meq7)+ -0.025f*(meq4-meq8) ;
//float feq13= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr) + -0.1f*(meq5-meq7)+ -0.025f*(meq6-meq8);
//float feq14= 0.052631579f*rho+ -0.0045948204f*(-11.f*rho+19.f*usqr)+ -0.015873016f*(-7.53968254*usqr) + -0.1f*(meq7-meq8);
//float feq15= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ 0.1f*(meq3-meq7)+ 0.025f*(meq4-meq8) ;
//float feq16= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr) + 0.1f*(meq5-meq7)+ 0.025f*(meq6-meq8);
//float feq17= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ -0.1f*(meq3+meq7)+ -0.025f*(meq4+meq8) ;
//float feq18= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ + -0.1f*(meq5+meq7)+ -0.025f*(meq6+meq8);
//
//feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
//feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
//feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
//feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
//feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
//float u2 = u*u;
//float v2 = v*v;
//float w2 = w*w;
//float usqr = u2+v2+w2;
//
//
//f0 =SF*f0 +(1.0f-SF)*(0.1904761791f*rho+ -0.5971277356f*usqr);
//f1 =SF*f1 +(1.0f-SF)*(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2);
//f2 =SF*f2 +(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+ (0.166666667f*v) +-0.08333333f*u2+ 0.083333333f*(v2-w2));
//f3 =SF*f3 +(1.0f-SF)*(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2);
//f4 =SF*f4 +(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+-(0.166666667f*v) +-0.08333333f*u2+ 0.083333333f*(v2-w2));
//f5 =SF*f5 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v);
//f6 =SF*f6 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v);
//f7 =SF*f7 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v);
//f8 =SF*f8 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v);
//f9 =SF*f9 +(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+ (0.166666667f*w) +-0.08333333f*(u2 +(v2-w2)));//+ -0.083333333f*(v2-w2));
//f10=SF*f10+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w);
//f12=SF*f12+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w);
//f15=SF*f15+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w);
//f17=SF*f17+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w);
//f11=SF*f11+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+ 0.0833333333f*(v+w)+-0.16666667f*u2+ 0.25f*v*w);
//f13=SF*f13+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+-0.0833333333f*(v-w)+-0.16666667f*u2 -0.25f*v*w);
//f16=SF*f16+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+ 0.0833333333f*(v-w)+-0.16666667f*u2+ -0.25f*v*w);
//f18=SF*f18+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+-0.0833333333f*(v+w)+-0.16666667f*u2+ 0.25f*v*w);
//f14=SF*f14+(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+-(0.166666667f*w) +-0.08333333f*(u2 +(v2-w2)));//+ -0.083333333f*(v2-w2));
//f0 =SF*f0 +(1.0f-SF)*(0.1904761791f*rho+ -0.5971277356f*usqr);
//f1 =SF*f1 +(1.0f-SF)*(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2);
//f2 =SF*f2 +(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+ (0.166666667f*v) +-0.08333333f*u2+ 0.083333333f*(v2-w2));
//f3 =SF*f3 +(1.0f-SF)*(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2);
//f4 =SF*f4 +(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+-(0.166666667f*v) +-0.08333333f*u2+ 0.083333333f*(v2-w2));
//f5 =SF*f5 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(u+v)+ 0.08333333f*u2+ 0.083333333f*(v2-w2)+ 0.25f*u*v);
//f6 =SF*f6 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+-0.0833333333f*(u-v)+ 0.08333333f*u2+ 0.083333333f*(v2-w2)+ -0.25f*u*v);
//f7 =SF*f7 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+-0.0833333333f*(u+v)+ 0.08333333f*u2+ 0.083333333f*(v2-w2)+ 0.25f*u*v);
//f8 =SF*f8 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(u-v)+ 0.08333333f*u2+ 0.083333333f*(v2-w2)+ -0.25f*u*v);
//f9 =SF*f9 +(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+ (0.166666667f*w) +-0.08333333f*u2+ -0.083333333f*(v2-w2));
//f10=SF*f10+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(u+w)+ 0.08333333f*u2+ -0.083333333f*(v2-w2)+ 0.25f*u*w);
//f11=SF*f11+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+ 0.0833333333f*(v+w)+-0.16666667f*u2+ 0.25f*v*w);
//f12=SF*f12+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+-0.0833333333f*(u-w)+ 0.08333333f*u2+ -0.083333333f*(v2-w2)+ -0.25f*u*w);
//f13=SF*f13+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+-0.0833333333f*(v-w)+-0.16666667f*u2 -0.25f*v*w);
//f14=SF*f14+(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+-(0.166666667f*w) +-0.08333333f*u2+ -0.083333333f*(v2-w2));
//f15=SF*f15+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+0.0833333333f*(u-w) + 0.08333333f*u2+ -0.083333333f*(v2-w2)+ -0.25f*u*w);
//f16=SF*f16+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+ 0.0833333333f*(v-w)+-0.16666667f*u2+ -0.25f*v*w);
//f17=SF*f17+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+-0.0833333333f*(u+w)+ 0.08333333f*u2+ -0.083333333f*(v2-w2)+ 0.25f*u*w);
//f18=SF*f18+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+-0.0833333333f*(v+w)+-0.16666667f*u2+ 0.25f*v*w);
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch)
{
return (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*ZDIM;
}
inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch)
{
return (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*ZLRDIM;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__global__ void simple_copy(float* fA, float* fB,
size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int k = dmin(x+1,XDIM)+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
fB[j] = fA[k];//+0.01f;
}
__global__ void simple_text(float* fA, float* fB,
size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
fB[j] = tex2D(texRef_f0A,x+1,y);//+0.01f;
}
__global__ void ExtractFromC_d(float* fout,
size_t pitch, float SF)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// if(x < LRX0+1 || x > LRX0+XLRDIM-2 || y < LRY0+1 || y > LRY0+YLRDIM-2 || z < LRZ0+1 || z > LRZ0+ZLRDIM-2)
// //if(x < LRX0+2 || x > LRX0+XLRDIM-3 || y < LRY0+2 || y > LRY0+YLRDIM-3 || z < LRZ0+2 || z > LRZ0+ZLRDIM-3)
// {
// //do nothing
// }
// else{
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+ZLRDIM*LRFACTOR-1) &&
(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1) || z == int(LRZ0+2) || z == int(LRY0+ZLRDIM*LRFACTOR-1)) )
{
// if(x > 10 && y > 10 && z > 10 && x < 20 && y < 20 && z < 20)
// {
float xcoord = LRLEVEL*(x-LRX0)+0.5f;
float ycoord = LRLEVEL*(y-LRY0)+0.5f;
float zcoord = LRLEVEL*(z-LRZ0);
int zminus = int(zcoord);
int zplus = zminus+1;
f0 = (zplus-zcoord)*tex2D(texRef_f0C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0C ,xcoord,ycoord+YLRDIM*(zplus));
f2 = (zplus-zcoord)*tex2D(texRef_f2C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2C ,xcoord,ycoord+YLRDIM*(zplus));
f4 = (zplus-zcoord)*tex2D(texRef_f4C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4C ,xcoord,ycoord+YLRDIM*(zplus));
f9 = (zplus-zcoord)*tex2D(texRef_f9C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9C ,xcoord,ycoord+YLRDIM*(zplus));
f11= (zplus-zcoord)*tex2D(texRef_f11C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11C,xcoord,ycoord+YLRDIM*(zplus));
f13= (zplus-zcoord)*tex2D(texRef_f13C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13C,xcoord,ycoord+YLRDIM*(zplus));
f14= (zplus-zcoord)*tex2D(texRef_f14C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14C,xcoord,ycoord+YLRDIM*(zplus));
f16= (zplus-zcoord)*tex2D(texRef_f16C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16C,xcoord,ycoord+YLRDIM*(zplus));
f18= (zplus-zcoord)*tex2D(texRef_f18C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18C,xcoord,ycoord+YLRDIM*(zplus));
f1 = (zplus-zcoord)*tex2D(texRef_f1C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1C ,xcoord,ycoord+YLRDIM*(zplus));
f3 = (zplus-zcoord)*tex2D(texRef_f3C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3C ,xcoord,ycoord+YLRDIM*(zplus));
f5 = (zplus-zcoord)*tex2D(texRef_f5C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5C ,xcoord,ycoord+YLRDIM*(zplus));
f6 = (zplus-zcoord)*tex2D(texRef_f6C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6C ,xcoord,ycoord+YLRDIM*(zplus));
f7 = (zplus-zcoord)*tex2D(texRef_f7C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7C ,xcoord,ycoord+YLRDIM*(zplus));
f8 = (zplus-zcoord)*tex2D(texRef_f8C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8C ,xcoord,ycoord+YLRDIM*(zplus));
f15= (zplus-zcoord)*tex2D(texRef_f15C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15C,xcoord,ycoord+YLRDIM*(zplus));
f17= (zplus-zcoord)*tex2D(texRef_f17C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17C,xcoord,ycoord+YLRDIM*(zplus));
f10= (zplus-zcoord)*tex2D(texRef_f10C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10C,xcoord,ycoord+YLRDIM*(zplus));
f12= (zplus-zcoord)*tex2D(texRef_f12C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12C,xcoord,ycoord+YLRDIM*(zplus));
mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF);
// float rho,u,v,w;
// rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
// f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
// float usqr = fma(u,u,fma(v,v,w*w));
//
//// float m1 -= -11.f*rho+19.f*(u*u+v*v+w*w);
//// float m4 = -0.66666667f*u;//qx_eq
//// float m6 = -0.66666667f*v;//qx_eq
//// float m8 = -0.66666667f*w;//qx_eq
//// float m9 -= (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq
//// float m11-= (v*v-w*w);//pww_eq
//// float m13-= u*v;//pxy_eq
//// float m14-= v*w;//pyz_eq
//// float m15-= u*w;//pxz_eq
//
// if(MODEL == "MRT")
// {
// float meq0 = rho;
// float meq1 = -11.f*rho+19.f*(u*u+v*v+w*w);
// float meq2 = -7.53968254f*(u*u+v*v+w*w);
// float meq3 = u;
// float meq4 = -0.66666667f*u;//qx_eq
// float meq5 = v;
// float meq6 = -0.66666667f*v;//qx_eq
// float meq7 = w;
// float meq8 = -0.66666667f*w;//qx_eq
// float meq9 = (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq
// float meq11= (v*v-w*w);//pww_eq
// float meq13= u*v;//pxy_eq
// float meq14= v*w;//pyz_eq
// float meq15= u*w;//pxz_eq
//
//float feq0 = 0.052631579f*meq0+ -0.012531328f*meq1+ 0.047619048f*meq2 ;
//float feq1 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ 0.1f*meq3+ -0.1f*meq4 ;
//float feq2 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + 0.1f*meq5+ -0.1f*meq6 ;
//float feq3 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ -0.1f*meq3+ 0.1f*meq4 ;
//float feq4 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -0.1f*meq5+ 0.1f*meq6 ;
//float feq5 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*meq3+ 0.025f*meq4+ 0.1f*meq5+ 0.025f*meq6 ;
//float feq6 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*meq3+ -0.025f*meq4+ 0.1f*meq5+ 0.025f*meq6 ;
//float feq7 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*meq3+ -0.025f*meq4+ -0.1f*meq5+ -0.025f*meq6 ;
//float feq8 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*meq3+ 0.025f*meq4+ -0.1f*meq5+ -0.025f*meq6 ;
//float feq9 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + 0.1f*meq7+ -0.1f*meq8;
//float feq10= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*meq3+ 0.025f*meq4 + 0.1f*meq7+ 0.025f*meq8;
//float feq11= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*meq5+ 0.025f*meq6+ 0.1f*meq7+ 0.025f*meq8;
//float feq12= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*meq3+ -0.025f*meq4 + 0.1f*meq7+ 0.025f*meq8;
//float feq13= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + -0.1f*meq5+ -0.025f*meq6+ 0.1f*meq7+ 0.025f*meq8;
//float feq14= 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -0.1f*meq7+ 0.1f*meq8;
//float feq15= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*meq3+ 0.025f*meq4 + -0.1f*meq7+ -0.025f*meq8;
//float feq16= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*meq5+ 0.025f*meq6+ -0.1f*meq7+ -0.025f*meq8;
//float feq17= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*meq3+ -0.025f*meq4 + -0.1f*meq7+ -0.025f*meq8;
//float feq18= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ + -0.1f*meq5+ -0.025f*meq6+ -0.1f*meq7+ -0.025f*meq8;
//
//feq1 += 0.055555556f*meq9;
//feq2 += -0.027777778f*meq9+ 0.083333333f*meq11;
//feq3 += 0.055555556f*meq9;
//feq4 += -0.027777778f*meq9+ 0.083333333f*meq11;
//feq5 += 0.027777778f*meq9+ 0.083333333f*meq11+ 0.25f*meq13 ;
//feq6 += 0.027777778f*meq9+ 0.083333333f*meq11+ -0.25f*meq13 ;
//feq7 += 0.027777778f*meq9+ 0.083333333f*meq11+ 0.25f*meq13 ;
//feq8 += 0.027777778f*meq9+ 0.083333333f*meq11+ -0.25f*meq13 ;
//feq9 += -0.027777778f*meq9+ -0.083333333f*meq11 ;
//feq10+= 0.027777778f*meq9+ -0.083333333f*meq11 + 0.25f*meq15;
//feq11+= -0.055555556f*meq9 + 0.25f*meq14 ;
//feq12+= 0.027777778f*meq9+ -0.083333333f*meq11 + -0.25f*meq15;
//feq13+= -0.055555556f*meq9 -0.25f*meq14 ;
//feq14+= -0.027777778f*meq9+ -0.083333333f*meq11 ;
//feq15+= 0.027777778f*meq9+ -0.083333333f*meq11 + -0.25f*meq15;
//feq16+= -0.055555556f*meq9 + -0.25f*meq14 ;
//feq17+= 0.027777778f*meq9+ -0.083333333f*meq11 + 0.25f*meq15;
//feq18+= -0.055555556f*meq9 + 0.25f*meq14 ;
//
//
//f0 =SF*f0 +(1.0f-SF)*feq0 ;
//f1 =SF*f1 +(1.0f-SF)*feq1 ;
//f2 =SF*f2 +(1.0f-SF)*feq2 ;
//f3 =SF*f3 +(1.0f-SF)*feq3 ;
//f4 =SF*f4 +(1.0f-SF)*feq4 ;
//f5 =SF*f5 +(1.0f-SF)*feq5 ;
//f6 =SF*f6 +(1.0f-SF)*feq6 ;
//f7 =SF*f7 +(1.0f-SF)*feq7 ;
//f8 =SF*f8 +(1.0f-SF)*feq8 ;
//f9 =SF*f9 +(1.0f-SF)*feq9 ;
//f10=SF*f10+(1.0f-SF)*feq10;
//f11=SF*f11+(1.0f-SF)*feq11;
//f12=SF*f12+(1.0f-SF)*feq12;
//f13=SF*f13+(1.0f-SF)*feq13;
//f14=SF*f14+(1.0f-SF)*feq14;
//f15=SF*f15+(1.0f-SF)*feq15;
//f16=SF*f16+(1.0f-SF)*feq16;
//f17=SF*f17+(1.0f-SF)*feq17;
//f18=SF*f18+(1.0f-SF)*feq18;
//
//
//
//
//
//
//// float m2 = -7.53968254f*(u*u+v*v+w*w);
//// //scale
//// f0 =SF*f0 +(1.0f-SF)*(0.052631579f*rho+ -0.012531328f*(-11.f*rho+19.f*usqr)+ 0.047619048f*m2);
//// f1 =SF*f1 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
//// f2 =SF*f2 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
//// f3 =SF*f3 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
//// f4 =SF*f4 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
//// f5 =SF*f5 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+v)+0.01666666667f*(-u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
//// f6 =SF*f6 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+v)+0.01666666667f*( u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
//// f7 =SF*f7 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-v)+0.01666666667f*( u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
//// f8 =SF*f8 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-v)+0.01666666667f*(-u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
//// f9 =SF*f9 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*w) -0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
//// f10=SF*f10+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+w)+0.01666666667f*(-u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f11=SF*f11+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
//// f12=SF*f12+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+w)+0.01666666667f*( u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) -0.25f*u*v);
//// f13=SF*f13+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w );
//// f14=SF*f14+(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
//// f15=SF*f15+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-w)+0.01666666667f*(-u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f16=SF*f16+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
//// f17=SF*f17+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-w)+0.01666666667f*( u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f18=SF*f18+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// }
// else
// {
//// //scale
////f0 =SF*f0 +(1.0f-SF)*(0.052631579f*rho+ -0.012531328f*(-11.f*rho+19.f*usqr)+ 0.047619048f*m2);
////f1 =SF*f1 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +0.1f*(u+0.666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
////f2 =SF*f2 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +0.1f*(v+0.666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
////f3 =SF*f3 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +0.1f*(-0.666666667f*u-u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
////f4 =SF*f4 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +0.1f*(-0.666666667f*v-v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
////f5 =SF*f5 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+v)+0.025f*( -0.666666667f*u+-0.666666667f*v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
////f6 =SF*f6 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+v)+0.025f*(+0.666666667f*u+-0.666666667f*v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
////f7 =SF*f7 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-v)+0.025f*(+0.666666667f*u+0.666666667f*v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
////f8 =SF*f8 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-v)+0.025f*( -0.666666667f*u+0.666666667f*v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
////f9 =SF*f9 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +0.1f*(w+0.666666667f*w) -0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
////f10=SF*f10+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+w)+0.025f*( -0.666666667f*u+-0.666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
////f11=SF*f11+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v+w)+0.025f*( -0.666666667f*v+-0.666666667f*w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
////f12=SF*f12+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+w)+0.025f*(+0.666666667f*u+-0.666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) -0.25f*u*v);
////f13=SF*f13+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v+w)+0.025f*( -0.666666667f*v+-0.666666667f*w) -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w );
////f14=SF*f14+(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +0.1f*(-0.666666667f*w-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
////f15=SF*f15+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-w)+0.025f*( -0.666666667f*u+0.666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
////f16=SF*f16+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v-w)+0.025f*( -0.666666667f*v+0.666666667f*w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
////f17=SF*f17+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-w)+0.025f*(+0.666666667f*u+0.666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
////f18=SF*f18+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v-w)+0.025f*( -0.666666667f*v+0.666666667f*w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// //scale
// f0 =SF*f0 +(1.0f-SF)*(0.3333333333f*(rho-1.5f*usqr));
// f1 =SF*f1 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =SF*f2 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =SF*f3 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =SF*f4 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =SF*f5 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =SF*f6 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =SF*f7 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =SF*f8 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =SF*f9 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=SF*f10+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=SF*f11+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=SF*f12+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=SF*f13+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=SF*f14+(1.0f-SF)*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=SF*f15+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=SF*f16+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=SF*f17+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=SF*f18+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
// }
fout[f_mem(0 ,x,y,z,pitch)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch)] = f9 ;
fout[f_mem(10,x,y,z,pitch)] = f10;
fout[f_mem(11,x,y,z,pitch)] = f11;
fout[f_mem(12,x,y,z,pitch)] = f12;
fout[f_mem(13,x,y,z,pitch)] = f13;
fout[f_mem(14,x,y,z,pitch)] = f14;
fout[f_mem(15,x,y,z,pitch)] = f15;
fout[f_mem(16,x,y,z,pitch)] = f16;
fout[f_mem(17,x,y,z,pitch)] = f17;
fout[f_mem(18,x,y,z,pitch)] = f18;
}
}
__global__ void LR_d_hybABCD_force(float* fin, float* fout,
float omega, size_t pitch, float *FX, float *FY, float *FZ, int t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
// __shared__ int check[1];
// check[0] = 0;
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
syncthreads();
// if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2)
// {
// //dont do anything
// sumX[threadIdx.x]=0.f;
// sumY[threadIdx.x]=0.f;
// sumZ[threadIdx.x]=0.f;
// }
// else{
// f0 = fin[j];
// f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
// f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
// f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
// f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
// f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
// f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
// f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
// if(z != ZLRDIM-1){
// f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
// }
// f1 = tex2D(texRef_f1C ,x-1+0.5f,y +0.5f+YLRDIM*(z));
// f3 = tex2D(texRef_f3C ,x+1+0.5f,y +0.5f+YLRDIM*(z));
// f5 = tex2D(texRef_f5C ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
// f6 = tex2D(texRef_f6C ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
// f7 = tex2D(texRef_f7C ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
// f8 = tex2D(texRef_f8C ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
// f15= tex2D(texRef_f15C,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
// f17= tex2D(texRef_f17C,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
// f10= tex2D(texRef_f10C,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
// f12= tex2D(texRef_f12C,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
//
//
f1 = 0.1f;//fin[f_memLR(1 ,x,y,z,pitch)];
f3 = 0.1f;//fin[f_memLR(3 ,x,y,z,pitch)];
f2 = 0.1f;//fin[f_memLR(2 ,x,y,z,pitch)];
f5 = 0.1f;//fin[f_memLR(5 ,x,y,z,pitch)];
f6 = 0.1f;//fin[f_memLR(6 ,x,y,z,pitch)];
f4 = 0.1f;//fin[f_memLR(4 ,x,y,z,pitch)];
f7 = 0.1f;//fin[f_memLR(7 ,x,y,z,pitch)];
f8 = 0.1f;//fin[f_memLR(8 ,x,y,z,pitch)];
f9 = 0.1f;//fin[f_memLR(9 ,x,y,z,pitch)];
f10= 0.1f;//fin[f_memLR(10,x,y,z,pitch)];
f11= 0.1f;//fin[f_memLR(11,x,y,z,pitch)];
f12= 0.1f;//fin[f_memLR(12,x,y,z,pitch)];
f13= 0.1f;//fin[f_memLR(13,x,y,z,pitch)];
f14= 0.1f;//fin[f_memLR(14,x,y,z,pitch)];
f15= 0.1f;//fin[f_memLR(15,x,y,z,pitch)];
f16= 0.1f;//fin[f_memLR(16,x,y,z,pitch)];
f17= 0.1f;//fin[f_memLR(17,x,y,z,pitch)];
f18= 0.1f;//fin[f_memLR(18,x,y,z,pitch)];
syncthreads();
// if(im == 1 || im ==10){//BB
// if(im == 10){
// check[0] = 1;
float temp = 2.f*(f9+f10+f11);
temp += 2.f*(-f14-f15-f16);
sumX[threadIdx.x]=2.f*f1-2.f*f3 +2.f*f5 +2.f*f8 -2.f*f6 -2.f*f7 +2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4 +2.f*f5 -2.f*f8 +2.f*f6 -2.f*f7 +2.f*f11-2.f*f13+2.f*f16-2.f*f18;
//sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
//sumZ[threadIdx.x]=2.f*(f9+f10+f11-f14-f15-f16);
sumZ[threadIdx.x]=temp;//f9+f10+f11;//-f15-f16;
//sumZ[threadIdx.x]+=-f14-f15-f16;
// }
// else{
// sumX[threadIdx.x]=0.f;
// sumY[threadIdx.x]=0.f;
// sumZ[threadIdx.x]=0.f;
// }
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
syncthreads();
fout[f_memLR(1 ,x,y,z,pitch)] = sumX[threadIdx.x];
fout[f_memLR(2 ,x,y,z,pitch)] = sumY[threadIdx.x];
fout[f_memLR(3 ,x,y,z,pitch)] = sumZ[threadIdx.x];
// }
// else{
// sumX[threadIdx.x]=0.f;
// sumY[threadIdx.x]=0.f;
// sumZ[threadIdx.x]=0.f;
//// if(im == 3)//DirichletWest
//// {
//// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//// }
//// else if(im == 13)//DirichletWest
//// {
//// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//// }
//// else if(im == 14)//DirichletWest
//// {
//// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//// }
// boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
//
//
// if(MODEL == "MRT")
// mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
// else if(MODEL == "BGK")
// bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
//
// fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
// fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
// fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
// fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
// fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
// fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
// fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
// fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
// fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
// fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
// fout[f_memLR(10,x,y,z,pitch)] = f10;
// fout[f_memLR(11,x,y,z,pitch)] = f11;
// fout[f_memLR(12,x,y,z,pitch)] = f12;
// fout[f_memLR(13,x,y,z,pitch)] = f13;
// fout[f_memLR(14,x,y,z,pitch)] = f14;
// fout[f_memLR(15,x,y,z,pitch)] = f15;
// fout[f_memLR(16,x,y,z,pitch)] = f16;
// fout[f_memLR(17,x,y,z,pitch)] = f17;
// fout[f_memLR(18,x,y,z,pitch)] = f18;
// }
// }//end else (not at edge of LR)
syncthreads();
// if(check[0] == 1 && t>=STARTF){
// //reduction for force
// int nTotalThreads = blockDim.x;
// while(nTotalThreads > 1){
// int halfPoint = (nTotalThreads >> 1);
// if(threadIdx.x < halfPoint){
// sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
// sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
// sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
// }
// syncthreads();
// nTotalThreads = halfPoint;
// }
// if(threadIdx.x == 0){
// atomicAdd(&FX[t],sumX[0]);
// atomicAdd(&FY[t],sumY[0]);
// atomicAdd(&FZ[t],sumZ[0]);
// }
// }
}
__global__ void LR_d_hybBACD_force(float* fin, float* fout,
float omega, size_t pitch, float *FX, float *FY, float *FZ, int t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2)
{
//dont do anything
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1C ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3C ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5C ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6C ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7C ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8C ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15C,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17C,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10C,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12C,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
syncthreads();
if(check[0] == 1 && t>=STARTF){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t],sumX[0]);
atomicAdd(&FY[t],sumY[0]);
atomicAdd(&FZ[t],sumZ[0]);
}
}
}
__global__ void LR_d_hybABCD(float* fin, float* fout,
float omega, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2)
{
//dont do anything
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1C ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3C ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5C ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6C ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7C ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8C ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15C,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17C,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10C,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12C,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_hybABCD2(float* fin, float* fout,
float omega, size_t pitch, int n)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < n || x > XLRDIM-1-n || y < n || y > YLRDIM-1-n || z < n || z > ZLRDIM-1-n)
{
//dont do anything
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1C ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3C ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5C ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6C ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7C ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8C ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15C,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17C,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10C,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12C,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_hybABDC(float* fin, float* fout,
float omega, size_t pitch, float SF)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered.
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation
// int zminus = int(zcoord);
// int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < 2 || x > XLRDIM-3 || y < 2 || y > YLRDIM-3 || z < 2 || z > ZLRDIM-3)
{
//no interp
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1D ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3D ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5D ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6D ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7D ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8D ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15D,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17D,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10D,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12D,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_hybABDC2(float* fin, float* fout,
float omega, size_t pitch, float SF, int n)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered.
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation
// int zminus = int(zcoord);
// int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < n || x > XLRDIM-1-n || y < n || y > YLRDIM-1-n || z < n || z > ZLRDIM-1-n)
{
//no interp
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1D ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3D ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5D ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6D ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7D ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8D ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15D,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17D,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10D,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12D,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_hybABDC_Interp(float* fin, float* fout,
float omega, size_t pitch, float SF)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered.
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation
int zminus = int(zcoord);
int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL || z < LRLEVEL || z > ZLRDIM-1-LRLEVEL)
{
//interpolate for next time step. from B //YDIM and not YLRDIM
f0 = (zplus-zcoord)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f2 = (zplus-zcoord)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f4 = (zplus-zcoord)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f9 = (zplus-zcoord)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f11= (zplus-zcoord)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f13= (zplus-zcoord)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f14= (zplus-zcoord)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f16= (zplus-zcoord)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f18= (zplus-zcoord)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f1 = (zplus-zcoord)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f3 = (zplus-zcoord)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f5 = (zplus-zcoord)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f6 = (zplus-zcoord)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f7 = (zplus-zcoord)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f8 = (zplus-zcoord)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f15= (zplus-zcoord)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f17= (zplus-zcoord)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f10= (zplus-zcoord)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f12= (zplus-zcoord)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF);
// float rho,u,v,w;
// rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
// f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
//// u = rho*u;
//// v = rho*v;
//// w = rho*w;
// float usqr = fma(u,u,fma(v,v,w*w));
//
// if(MODEL == "MRT")
// {
// float meq0 = rho;
// float meq1 = -11.f*rho+19.f*(u*u+v*v+w*w);
// float meq2 = -7.53968254f*(u*u+v*v+w*w);
// float meq3 = u;
// float meq4 = -0.66666667f*u;//qx_eq
// float meq5 = v;
// float meq6 = -0.66666667f*v;//qx_eq
// float meq7 = w;
// float meq8 = -0.66666667f*w;//qx_eq
// float meq9 = (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq
// float meq11= (v*v-w*w);//pww_eq
// float meq13= u*v;//pxy_eq
// float meq14= v*w;//pyz_eq
// float meq15= u*w;//pxz_eq
//
////float feq0 = 0.052631579f*meq0+ -0.012531328f*meq1+ 0.047619048f*meq2 ;
////float feq1 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ (0.166666667f*u) ;
////float feq2 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + (0.166666667f*v) ;
////float feq3 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ -(0.166666667f*u) ;
////float feq4 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -(0.166666667f*v) ;
////float feq5 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3+meq5)+ 0.025f*(meq4+meq6) ;
////float feq6 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3-meq5)+ -0.025f*(meq4-meq6) ;
////float feq7 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3+meq5)+ -0.025f*(meq4+meq6) ;
////float feq8 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3-meq5)+ 0.025f*(meq4-meq6) ;
////float feq9 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + (0.166666667f*w);
////float feq10= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3+meq7)+ 0.025f*(meq4+meq8) ;
////float feq11= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*(meq5+meq7)+ 0.025f*(meq6+meq8);
////float feq12= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3-meq7)+ -0.025f*(meq4-meq8) ;
////float feq13= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + -0.1f*(meq5-meq7)+ -0.025f*(meq6-meq8);
////float feq14= 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -(0.166666667f*w);
////float feq15= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3-meq7)+ 0.025f*(meq4-meq8) ;
////float feq16= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*(meq5-meq7)+ 0.025f*(meq6-meq8);
////float feq17= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3+meq7)+ -0.025f*(meq4+meq8) ;
////float feq18= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ + -0.1f*(meq5+meq7)+ -0.025f*(meq6+meq8);
//
//float feq0 = 0.052631579f*meq0+ -0.012531328f*meq1+ 0.047619048f*meq2 ;
//float feq1 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ 0.1f*(meq3-meq4) ;
//float feq2 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + 0.1f*(meq5-meq6) ;
//float feq3 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ -0.1f*(meq3-meq4) ;
//float feq4 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -0.1f*(meq5-meq6) ;
//float feq5 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3+meq5)+ 0.025f*(meq4+meq6) ;
//float feq6 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3-meq5)+ -0.025f*(meq4-meq6) ;
//float feq7 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3+meq5)+ -0.025f*(meq4+meq6) ;
//float feq8 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3-meq5)+ 0.025f*(meq4-meq6) ;
//float feq9 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + 0.1f*(meq7-meq8);
//float feq10= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3+meq7)+ 0.025f*(meq4+meq8) ;
//float feq11= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*(meq5+meq7)+ 0.025f*(meq6+meq8);
//float feq12= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3-meq7)+ -0.025f*(meq4-meq8) ;
//float feq13= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + -0.1f*(meq5-meq7)+ -0.025f*(meq6-meq8);
//float feq14= 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -0.1f*(meq7-meq8);
//float feq15= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3-meq7)+ 0.025f*(meq4-meq8) ;
//float feq16= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*(meq5-meq7)+ 0.025f*(meq6-meq8);
//float feq17= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3+meq7)+ -0.025f*(meq4+meq8) ;
//float feq18= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ + -0.1f*(meq5+meq7)+ -0.025f*(meq6+meq8);
//
//feq1 += 0.055555556f*meq9;
//feq2 += -0.027777778f*meq9+ 0.083333333f*meq11;
//feq3 += 0.055555556f*meq9;
//feq4 += -0.027777778f*meq9+ 0.083333333f*meq11;
//feq5 += 0.027777778f*meq9+ 0.083333333f*meq11+ 0.25f*meq13 ;
//feq6 += 0.027777778f*meq9+ 0.083333333f*meq11+ -0.25f*meq13 ;
//feq7 += 0.027777778f*meq9+ 0.083333333f*meq11+ 0.25f*meq13 ;
//feq8 += 0.027777778f*meq9+ 0.083333333f*meq11+ -0.25f*meq13 ;
//feq9 += -0.027777778f*meq9+ -0.083333333f*meq11 ;
//feq10+= 0.027777778f*meq9+ -0.083333333f*meq11 + 0.25f*meq15;
//feq11+= -0.055555556f*meq9 + 0.25f*meq14 ;
//feq12+= 0.027777778f*meq9+ -0.083333333f*meq11 + -0.25f*meq15;
//feq13+= -0.055555556f*meq9 -0.25f*meq14 ;
//feq14+= -0.027777778f*meq9+ -0.083333333f*meq11 ;
//feq15+= 0.027777778f*meq9+ -0.083333333f*meq11 + -0.25f*meq15;
//feq16+= -0.055555556f*meq9 + -0.25f*meq14 ;
//feq17+= 0.027777778f*meq9+ -0.083333333f*meq11 + 0.25f*meq15;
//feq18+= -0.055555556f*meq9 + 0.25f*meq14 ;
//
//
//f0 =SF*f0 +(1.0f-SF)*feq0 ;
//f1 =SF*f1 +(1.0f-SF)*feq1 ;
//f2 =SF*f2 +(1.0f-SF)*feq2 ;
//f3 =SF*f3 +(1.0f-SF)*feq3 ;
//f4 =SF*f4 +(1.0f-SF)*feq4 ;
//f5 =SF*f5 +(1.0f-SF)*feq5 ;
//f6 =SF*f6 +(1.0f-SF)*feq6 ;
//f7 =SF*f7 +(1.0f-SF)*feq7 ;
//f8 =SF*f8 +(1.0f-SF)*feq8 ;
//f9 =SF*f9 +(1.0f-SF)*feq9 ;
//f10=SF*f10+(1.0f-SF)*feq10;
//f11=SF*f11+(1.0f-SF)*feq11;
//f12=SF*f12+(1.0f-SF)*feq12;
//f13=SF*f13+(1.0f-SF)*feq13;
//f14=SF*f14+(1.0f-SF)*feq14;
//f15=SF*f15+(1.0f-SF)*feq15;
//f16=SF*f16+(1.0f-SF)*feq16;
//f17=SF*f17+(1.0f-SF)*feq17;
//f18=SF*f18+(1.0f-SF)*feq18;
//
//
//
//
//
//
//// float m2 = -7.53968254f*(u*u+v*v+w*w);
//// //scale
//// f0 =SF*f0 +(1.0f-SF)*(0.052631579f*rho+ -0.012531328f*(-11.f*rho+19.f*usqr)+ 0.047619048f*m2);
//// f1 =SF*f1 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
//// f2 =SF*f2 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
//// f3 =SF*f3 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
//// f4 =SF*f4 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
//// f5 =SF*f5 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+v)+0.01666666667f*(-u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
//// f6 =SF*f6 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+v)+0.01666666667f*( u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
//// f7 =SF*f7 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-v)+0.01666666667f*( u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
//// f8 =SF*f8 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-v)+0.01666666667f*(-u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
//// f9 =SF*f9 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*w) -0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
//// f10=SF*f10+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+w)+0.01666666667f*(-u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f11=SF*f11+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
//// f12=SF*f12+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+w)+0.01666666667f*( u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) -0.25f*u*v);
//// f13=SF*f13+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w );
//// f14=SF*f14+(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
//// f15=SF*f15+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-w)+0.01666666667f*(-u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f16=SF*f16+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
//// f17=SF*f17+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-w)+0.01666666667f*( u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f18=SF*f18+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// }
// else
// {
// //scale
// f0 =SF*f0 +(1.0f-SF)*(0.3333333333f*(rho-1.5f*usqr));
// f1 =SF*f1 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =SF*f2 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =SF*f3 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =SF*f4 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =SF*f5 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =SF*f6 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =SF*f7 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =SF*f8 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =SF*f9 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=SF*f10+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=SF*f11+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=SF*f12+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=SF*f13+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=SF*f14+(1.0f-SF)*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=SF*f15+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=SF*f16+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=SF*f17+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=SF*f18+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
// }
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1D ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3D ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5D ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6D ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7D ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8D ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15D,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17D,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10D,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12D,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_hybBADC(float* fin, float* fout,
float omega, size_t pitch, float SF)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered.
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation
// int zminus = int(zcoord);
// int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < 2 || x > XLRDIM-3 || y < 2 || y > YLRDIM-3 || z < 2 || z > ZLRDIM-3)
{
//no interp
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1D ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3D ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5D ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6D ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7D ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8D ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15D,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17D,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10D,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12D,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_hybBADC2(float* fin, float* fout,
float omega, size_t pitch, float SF, int n)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered.
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation
// int zminus = int(zcoord);
// int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < n || x > XLRDIM-1-n || y < n || y > YLRDIM-1-n || z < n || z > ZLRDIM-1-n)
{
//no interp
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1D ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3D ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5D ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6D ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7D ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8D ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15D,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17D,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10D,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12D,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_hybBADC_Interp(float* fin, float* fout,
float omega, size_t pitch, float SF)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered.
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation
int zminus = int(zcoord);
int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL || z < LRLEVEL || z > ZLRDIM-1-LRLEVEL)
{
//interpolate for next time step. from B
f0 = (zplus-zcoord)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f2 = (zplus-zcoord)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f4 = (zplus-zcoord)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f9 = (zplus-zcoord)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f11= (zplus-zcoord)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f13= (zplus-zcoord)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f14= (zplus-zcoord)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f16= (zplus-zcoord)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f18= (zplus-zcoord)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f1 = (zplus-zcoord)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f3 = (zplus-zcoord)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f5 = (zplus-zcoord)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f6 = (zplus-zcoord)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f7 = (zplus-zcoord)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f8 = (zplus-zcoord)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f15= (zplus-zcoord)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f17= (zplus-zcoord)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f10= (zplus-zcoord)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f12= (zplus-zcoord)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF);
// float rho,u,v,w;
// rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
// f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
//// u = rho*u;
//// v = rho*v;
//// w = rho*w;
// float usqr = fma(u,u,fma(v,v,w*w));
//
// if(MODEL == "MRT")
// {
// float meq0 = rho;
// float meq1 = -11.f*rho+19.f*(u*u+v*v+w*w);
// float meq2 = -7.53968254f*(u*u+v*v+w*w);
// float meq3 = u;
// float meq4 = -0.66666667f*u;//qx_eq
// float meq5 = v;
// float meq6 = -0.66666667f*v;//qx_eq
// float meq7 = w;
// float meq8 = -0.66666667f*w;//qx_eq
// float meq9 = (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq
// float meq11= (v*v-w*w);//pww_eq
// float meq13= u*v;//pxy_eq
// float meq14= v*w;//pyz_eq
// float meq15= u*w;//pxz_eq
//
////float feq0 = 0.052631579f*meq0+ -0.012531328f*meq1+ 0.047619048f*meq2 ;
////float feq1 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ (0.166666667f*u) ;
////float feq2 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + (0.166666667f*v) ;
////float feq3 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ -(0.166666667f*u) ;
////float feq4 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -(0.166666667f*v) ;
////float feq5 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3+meq5)+ 0.025f*(meq4+meq6) ;
////float feq6 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3-meq5)+ -0.025f*(meq4-meq6) ;
////float feq7 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3+meq5)+ -0.025f*(meq4+meq6) ;
////float feq8 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3-meq5)+ 0.025f*(meq4-meq6) ;
////float feq9 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + (0.166666667f*w);
////float feq10= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3+meq7)+ 0.025f*(meq4+meq8) ;
////float feq11= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*(meq5+meq7)+ 0.025f*(meq6+meq8);
////float feq12= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3-meq7)+ -0.025f*(meq4-meq8) ;
////float feq13= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + -0.1f*(meq5-meq7)+ -0.025f*(meq6-meq8);
////float feq14= 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -(0.166666667f*w);
////float feq15= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3-meq7)+ 0.025f*(meq4-meq8) ;
////float feq16= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*(meq5-meq7)+ 0.025f*(meq6-meq8);
////float feq17= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3+meq7)+ -0.025f*(meq4+meq8) ;
////float feq18= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ + -0.1f*(meq5+meq7)+ -0.025f*(meq6+meq8);
//
//float feq0 = 0.052631579f*meq0+ -0.012531328f*meq1+ 0.047619048f*meq2 ;
//float feq1 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ 0.1f*(meq3-meq4) ;
//float feq2 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + 0.1f*(meq5-meq6) ;
//float feq3 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ -0.1f*(meq3-meq4) ;
//float feq4 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -0.1f*(meq5-meq6) ;
//float feq5 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3+meq5)+ 0.025f*(meq4+meq6) ;
//float feq6 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3-meq5)+ -0.025f*(meq4-meq6) ;
//float feq7 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3+meq5)+ -0.025f*(meq4+meq6) ;
//float feq8 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3-meq5)+ 0.025f*(meq4-meq6) ;
//float feq9 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + 0.1f*meq7+ -0.1f*meq8;
//float feq10= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*meq3+ 0.025f*meq4 + 0.1f*meq7+ 0.025f*meq8;
//float feq11= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*meq5+ 0.025f*meq6+ 0.1f*meq7+ 0.025f*meq8;
//float feq12= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*meq3+ -0.025f*meq4 + 0.1f*meq7+ 0.025f*meq8;
//float feq13= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + -0.1f*meq5+ -0.025f*meq6+ 0.1f*meq7+ 0.025f*meq8;
//float feq14= 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -0.1f*meq7+ 0.1f*meq8;
//float feq15= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*meq3+ 0.025f*meq4 + -0.1f*meq7+ -0.025f*meq8;
//float feq16= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*meq5+ 0.025f*meq6+ -0.1f*meq7+ -0.025f*meq8;
//float feq17= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*meq3+ -0.025f*meq4 + -0.1f*meq7+ -0.025f*meq8;
//float feq18= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ + -0.1f*meq5+ -0.025f*meq6+ -0.1f*meq7+ -0.025f*meq8;
//
//feq1 += 0.055555556f*meq9;
//feq2 += -0.027777778f*meq9+ 0.083333333f*meq11;
//feq3 += 0.055555556f*meq9;
//feq4 += -0.027777778f*meq9+ 0.083333333f*meq11;
//feq5 += 0.027777778f*meq9+ 0.083333333f*meq11+ 0.25f*meq13 ;
//feq6 += 0.027777778f*meq9+ 0.083333333f*meq11+ -0.25f*meq13 ;
//feq7 += 0.027777778f*meq9+ 0.083333333f*meq11+ 0.25f*meq13 ;
//feq8 += 0.027777778f*meq9+ 0.083333333f*meq11+ -0.25f*meq13 ;
//feq9 += -0.027777778f*meq9+ -0.083333333f*meq11 ;
//feq10+= 0.027777778f*meq9+ -0.083333333f*meq11 + 0.25f*meq15;
//feq11+= -0.055555556f*meq9 + 0.25f*meq14 ;
//feq12+= 0.027777778f*meq9+ -0.083333333f*meq11 + -0.25f*meq15;
//feq13+= -0.055555556f*meq9 -0.25f*meq14 ;
//feq14+= -0.027777778f*meq9+ -0.083333333f*meq11 ;
//feq15+= 0.027777778f*meq9+ -0.083333333f*meq11 + -0.25f*meq15;
//feq16+= -0.055555556f*meq9 + -0.25f*meq14 ;
//feq17+= 0.027777778f*meq9+ -0.083333333f*meq11 + 0.25f*meq15;
//feq18+= -0.055555556f*meq9 + 0.25f*meq14 ;
//
//
//f0 =SF*f0 +(1.0f-SF)*feq0 ;
//f1 =SF*f1 +(1.0f-SF)*feq1 ;
//f2 =SF*f2 +(1.0f-SF)*feq2 ;
//f3 =SF*f3 +(1.0f-SF)*feq3 ;
//f4 =SF*f4 +(1.0f-SF)*feq4 ;
//f5 =SF*f5 +(1.0f-SF)*feq5 ;
//f6 =SF*f6 +(1.0f-SF)*feq6 ;
//f7 =SF*f7 +(1.0f-SF)*feq7 ;
//f8 =SF*f8 +(1.0f-SF)*feq8 ;
//f9 =SF*f9 +(1.0f-SF)*feq9 ;
//f10=SF*f10+(1.0f-SF)*feq10;
//f11=SF*f11+(1.0f-SF)*feq11;
//f12=SF*f12+(1.0f-SF)*feq12;
//f13=SF*f13+(1.0f-SF)*feq13;
//f14=SF*f14+(1.0f-SF)*feq14;
//f15=SF*f15+(1.0f-SF)*feq15;
//f16=SF*f16+(1.0f-SF)*feq16;
//f17=SF*f17+(1.0f-SF)*feq17;
//f18=SF*f18+(1.0f-SF)*feq18;
//
//
//
//
//
//
//// float m2 = -7.53968254f*(u*u+v*v+w*w);
//// //scale
//// f0 =SF*f0 +(1.0f-SF)*(0.052631579f*rho+ -0.012531328f*(-11.f*rho+19.f*usqr)+ 0.047619048f*m2);
//// f1 =SF*f1 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
//// f2 =SF*f2 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
//// f3 =SF*f3 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
//// f4 =SF*f4 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
//// f5 =SF*f5 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+v)+0.01666666667f*(-u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
//// f6 =SF*f6 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+v)+0.01666666667f*( u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
//// f7 =SF*f7 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-v)+0.01666666667f*( u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
//// f8 =SF*f8 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-v)+0.01666666667f*(-u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
//// f9 =SF*f9 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*w) -0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
//// f10=SF*f10+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+w)+0.01666666667f*(-u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f11=SF*f11+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
//// f12=SF*f12+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+w)+0.01666666667f*( u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) -0.25f*u*v);
//// f13=SF*f13+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w );
//// f14=SF*f14+(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
//// f15=SF*f15+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-w)+0.01666666667f*(-u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f16=SF*f16+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
//// f17=SF*f17+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-w)+0.01666666667f*( u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f18=SF*f18+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// }
// else
// {
// //scale
// f0 =SF*f0 +(1.0f-SF)*(0.3333333333f*(rho-1.5f*usqr));
// f1 =SF*f1 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =SF*f2 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =SF*f3 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =SF*f4 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =SF*f5 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =SF*f6 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =SF*f7 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =SF*f8 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =SF*f9 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=SF*f10+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=SF*f11+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=SF*f12+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=SF*f13+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=SF*f14+(1.0f-SF)*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=SF*f15+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=SF*f16+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=SF*f17+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=SF*f18+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
// }
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1D ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3D ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5D ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6D ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7D ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8D ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15D,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17D,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10D,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12D,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_ABCD(float* fin, float* fout,
float omega, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
// if(x < 2 || x > LRX0+XLRDIM-3 || y < 2 || y > LRY0+YLRDIM-3 || z < 2 || z > LRZ0+ZLRDIM-3)
// im = -1;//not valid for extraction
// if(x < 1 || x > LRX0+XLRDIM-2 || y < 1 || y > LRY0+YLRDIM-2 || z < 1 || z > LRZ0+ZLRDIM-2)
// {
// im = -2;//not valid for second TS
// }
if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2)
{
//dont do anything
}
else{
f0 = fin[j];
f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)];
f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)];
f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)];
f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f10= fin[f_memLR(10,x-1,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f12= fin[f_memLR(12,x+1,y ,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f15= fin[f_memLR(15,x-1,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
f17= fin[f_memLR(17,x+1,y ,z+1,pitch)];
f18= fin[f_memLR(18,x ,dmin(y+1,YLRDIM),dmin(z+1,ZLRDIM),pitch)];
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_ABDC(float* fin, float* fout,
float omega, size_t pitch, float SF)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered.
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation
int zminus = int(zcoord);
int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
// if(x < 2 || x > LRX0+XLRDIM-3 || y < 2 || y > LRY0+YLRDIM-3 || z < 2 || z > LRZ0+ZLRDIM-3)
// im = -1;//not valid for extraction
// if(x < 1 || x > LRX0+XLRDIM-2 || y < 1 || y > LRY0+YLRDIM-2 || z < 1 || z > LRZ0+ZLRDIM-2)
// {
// im = -2;//not valid for second TS
// }
if(x < 2 || x > XLRDIM-3 || y < 2 || y > YLRDIM-3 || z < 2 || z > ZLRDIM-3)
{
//interpolate for next time step. from B
f0 = (zplus-zcoord)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f2 = (zplus-zcoord)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f4 = (zplus-zcoord)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f9 = (zplus-zcoord)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f11= (zplus-zcoord)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f13= (zplus-zcoord)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f14= (zplus-zcoord)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f16= (zplus-zcoord)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f18= (zplus-zcoord)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f1 = (zplus-zcoord)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f3 = (zplus-zcoord)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f5 = (zplus-zcoord)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f6 = (zplus-zcoord)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f7 = (zplus-zcoord)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f8 = (zplus-zcoord)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f15= (zplus-zcoord)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f17= (zplus-zcoord)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f10= (zplus-zcoord)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f12= (zplus-zcoord)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF);
// float rho,u,v,w;
// rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
// f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
// float usqr = fma(u,u,fma(v,v,w*w));
//
// if(MODEL == "MRT")
// {
// float m2 = -7.53968254f*(u*u+v*v+w*w);
// //scale
// f0 =SF*f0 +(1.0f-SF)*(0.052631579f*rho+ -0.012531328f*(-11.f*rho+19.f*usqr)+ 0.047619048f*m2);
// f1 =SF*f1 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
// f2 =SF*f2 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
// f3 =SF*f3 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
// f4 =SF*f4 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
// f5 =SF*f5 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+v)+0.01666666667f*(-u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
// f6 =SF*f6 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+v)+0.01666666667f*( u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
// f7 =SF*f7 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-v)+0.01666666667f*( u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
// f8 =SF*f8 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-v)+0.01666666667f*(-u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
// f9 =SF*f9 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*w) -0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
// f10=SF*f10+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+w)+0.01666666667f*(-u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
// f11=SF*f11+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// f12=SF*f12+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+w)+0.01666666667f*( u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) -0.25f*u*v);
// f13=SF*f13+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w );
// f14=SF*f14+(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
// f15=SF*f15+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-w)+0.01666666667f*(-u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
// f16=SF*f16+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// f17=SF*f17+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-w)+0.01666666667f*( u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
// f18=SF*f18+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// }
// else
// {
//
// //scale
// f0 =SF*f0 +(1.0f-SF)*(0.3333333333f*(rho-1.5f*usqr));
// f1 =SF*f1 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =SF*f2 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =SF*f3 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =SF*f4 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =SF*f5 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =SF*f6 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =SF*f7 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =SF*f8 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =SF*f9 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=SF*f10+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=SF*f11+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=SF*f12+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=SF*f13+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=SF*f14+(1.0f-SF)*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=SF*f15+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=SF*f16+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=SF*f17+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=SF*f18+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
// }
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
else{
f0 = fin[j];
f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)];
f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)];
f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)];
f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f10= fin[f_memLR(10,x-1,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f12= fin[f_memLR(12,x+1,y ,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f15= fin[f_memLR(15,x-1,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
f17= fin[f_memLR(17,x+1,y ,z+1,pitch)];
//if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,dmin(y+1,YLRDIM),dmin(z+1,ZLRDIM),pitch)];
//}
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_BACD(float* fin, float* fout,
float omega, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
// if(x < 2 || x > LRX0+XLRDIM-3 || y < 2 || y > LRY0+YLRDIM-3 || z < 2 || z > LRZ0+ZLRDIM-3)
// im = -1;//not valid for extraction
// if(x < 1 || x > LRX0+XLRDIM-2 || y < 1 || y > LRY0+YLRDIM-2 || z < 1 || z > LRZ0+ZLRDIM-2)
// {
// im = -2;//not valid for second TS
// }
if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2)
{
//dont do anything
}
else{
f0 = fin[j];
f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)];
f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)];
f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)];
f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f10= fin[f_memLR(10,x-1,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f12= fin[f_memLR(12,x+1,y ,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f15= fin[f_memLR(15,x-1,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
f17= fin[f_memLR(17,x+1,y ,z+1,pitch)];
f18= fin[f_memLR(18,x ,dmin(y+1,YLRDIM),dmin(z+1,ZLRDIM),pitch)];
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_BADC(float* fin, float* fout,
float omega, size_t pitch, float SF)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int zminus = int(zcoord);
int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
// if(x < 2 || x > LRX0+XLRDIM-3 || y < 2 || y > LRY0+YLRDIM-3 || z < 2 || z > LRZ0+ZLRDIM-3)
// im = -1;//not valid for extraction
// if(x < 1 || x > LRX0+XLRDIM-2 || y < 1 || y > LRY0+YLRDIM-2 || z < 1 || z > LRZ0+ZLRDIM-2)
// {
// im = -2;//not valid for second TS
// }
if(x < 2 || x > XLRDIM-3 || y < 2 || y > YLRDIM-3 || z < 2 || z > ZLRDIM-3)
{
//interpolate for next time step. from A
f0 = (zplus-zcoord)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f2 = (zplus-zcoord)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f4 = (zplus-zcoord)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f9 = (zplus-zcoord)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f11= (zplus-zcoord)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f13= (zplus-zcoord)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f14= (zplus-zcoord)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f16= (zplus-zcoord)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f18= (zplus-zcoord)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f1 = (zplus-zcoord)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f3 = (zplus-zcoord)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f5 = (zplus-zcoord)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f6 = (zplus-zcoord)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f7 = (zplus-zcoord)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f8 = (zplus-zcoord)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f15= (zplus-zcoord)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f17= (zplus-zcoord)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f10= (zplus-zcoord)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f12= (zplus-zcoord)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF);
// float rho,u,v,w;
// rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
// f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
// float usqr = fma(u,u,fma(v,v,w*w));
//
// if(MODEL == "MRT")
// {
// float m2 = -7.53968254f*(u*u+v*v+w*w);
// //scale
// f0 =SF*f0 +(1.0f-SF)*(0.052631579f*rho+ -0.012531328f*(-11.f*rho+19.f*usqr)+ 0.047619048f*m2);
// f1 =SF*f1 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
// f2 =SF*f2 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
// f3 =SF*f3 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
// f4 =SF*f4 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
// f5 =SF*f5 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+v)+0.01666666667f*(-u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
// f6 =SF*f6 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+v)+0.01666666667f*( u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
// f7 =SF*f7 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-v)+0.01666666667f*( u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
// f8 =SF*f8 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-v)+0.01666666667f*(-u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
// f9 =SF*f9 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*w) -0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
// f10=SF*f10+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+w)+0.01666666667f*(-u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
// f11=SF*f11+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// f12=SF*f12+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+w)+0.01666666667f*( u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) -0.25f*u*v);
// f13=SF*f13+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w );
// f14=SF*f14+(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
// f15=SF*f15+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-w)+0.01666666667f*(-u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
// f16=SF*f16+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// f17=SF*f17+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-w)+0.01666666667f*( u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
// f18=SF*f18+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// }
// else
// {
//
// //scale
// f0 =SF*f0 +(1.0f-SF)*(0.3333333333f*(rho-1.5f*usqr));
// f1 =SF*f1 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =SF*f2 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =SF*f3 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =SF*f4 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =SF*f5 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =SF*f6 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =SF*f7 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =SF*f8 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =SF*f9 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=SF*f10+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=SF*f11+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=SF*f12+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=SF*f13+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=SF*f14+(1.0f-SF)*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=SF*f15+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=SF*f16+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=SF*f17+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=SF*f18+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
// }
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
else{
f0 = fin[j];
f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)];
f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)];
f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)];
f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f10= fin[f_memLR(10,x-1,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f12= fin[f_memLR(12,x+1,y ,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f15= fin[f_memLR(15,x-1,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
f17= fin[f_memLR(17,x+1,y ,z+1,pitch)];
//if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,dmin(y+1,YLRDIM),dmin(z+1,ZLRDIM),pitch)];
//}
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void mrt_d_hybAB_force(float* fin, float* fout,
float omega, size_t pitch, float *FX, float *FY, float *FZ, int t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;//;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
// if((REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 &&
// y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 &&
// z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)))
// {
// }
// else{
f0 = fin[j];
f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)];
f11= fin[f_mem(11,x ,y-1,z-1,pitch)];
f13= fin[f_mem(13,x ,y+1,z-1,pitch)];
f14= fin[f_mem(14,x ,y ,z+1,pitch)];
f16= fin[f_mem(16,x ,y-1,z+1,pitch)];
if(z != ZDIM-1){
f18= fin[f_mem(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1A ,x-1+0.5f,y +0.5f+YDIM*(z));
f3 = tex2D(texRef_f3A ,x+1+0.5f,y +0.5f+YDIM*(z));
f5 = tex2D(texRef_f5A ,x-1+0.5f,y-1+0.5f+YDIM*(z));
f6 = tex2D(texRef_f6A ,x+1+0.5f,y-1+0.5f+YDIM*(z));
f7 = tex2D(texRef_f7A ,x+1+0.5f,y+1+0.5f+YDIM*(z));
f8 = tex2D(texRef_f8A ,x-1+0.5f,y+1+0.5f+YDIM*(z));
f15= tex2D(texRef_f15A,x-1+0.5f,y +0.5f+YDIM*(z+1));
f17= tex2D(texRef_f17A,x+1+0.5f,y +0.5f+YDIM*(z+1));
f10= tex2D(texRef_f10A,x-1+0.5f,y +0.5f+YDIM*(z-1));
f12= tex2D(texRef_f12A,x+1+0.5f,y +0.5f+YDIM*(z-1));
int im = ImageFcn(x,y,z);
if(im == 1 || im == 10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fout[j+pitch*YDIM*ZDIM*1 ] = f3 ;
fout[j+pitch*YDIM*ZDIM*2 ] = f4 ;
fout[j+pitch*YDIM*ZDIM*3 ] = f1 ;
fout[j+pitch*YDIM*ZDIM*4 ] = f2 ;
fout[j+pitch*YDIM*ZDIM*5 ] = f7 ;
fout[j+pitch*YDIM*ZDIM*6 ] = f8 ;
fout[j+pitch*YDIM*ZDIM*7 ] = f5 ;
fout[j+pitch*YDIM*ZDIM*8 ] = f6 ;
fout[j+pitch*YDIM*ZDIM*9 ] = f14;
fout[j+pitch*YDIM*ZDIM*10] = f17;
fout[j+pitch*YDIM*ZDIM*11] = f18;
fout[j+pitch*YDIM*ZDIM*12] = f15;
fout[j+pitch*YDIM*ZDIM*13] = f16;
fout[j+pitch*YDIM*ZDIM*14] = f9 ;
fout[j+pitch*YDIM*ZDIM*15] = f12;
fout[j+pitch*YDIM*ZDIM*16] = f13;
fout[j+pitch*YDIM*ZDIM*17] = f10;
fout[j+pitch*YDIM*ZDIM*18] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 21)//ysymm top
// {
// ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 22)//ysymm bot
// {
// ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 23)//zsymm top
// {
// zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
// else if(im == 24)//zsymm bot
// {
// zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch)] = f9 ;
fout[f_mem(10,x,y,z,pitch)] = f10;
fout[f_mem(11,x,y,z,pitch)] = f11;
fout[f_mem(12,x,y,z,pitch)] = f12;
fout[f_mem(13,x,y,z,pitch)] = f13;
fout[f_mem(14,x,y,z,pitch)] = f14;
fout[f_mem(15,x,y,z,pitch)] = f15;
fout[f_mem(16,x,y,z,pitch)] = f16;
fout[f_mem(17,x,y,z,pitch)] = f17;
fout[f_mem(18,x,y,z,pitch)] = f18;
}
syncthreads();
if(check[0] == 1 && t>=STARTF){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t],sumX[0]);
atomicAdd(&FY[t],sumY[0]);
atomicAdd(&FZ[t],sumZ[0]);
}
}
// }
}
__global__ void mrt_d_hybBA_force(float* fin, float* fout,
float omega, size_t pitch, float *FX, float *FY, float *FZ, int t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;//;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
// if((REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 &&
// y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 &&
// z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)))
// {
// }
// else{
f0 = fin[j];
f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)];
f11= fin[f_mem(11,x ,y-1,z-1,pitch)];
f13= fin[f_mem(13,x ,y+1,z-1,pitch)];
f14= fin[f_mem(14,x ,y ,z+1,pitch)];
f16= fin[f_mem(16,x ,y-1,z+1,pitch)];
if(z != ZDIM-1){
f18= fin[f_mem(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1B ,x-1+0.5f,y +0.5f+YDIM*(z));
f3 = tex2D(texRef_f3B ,x+1+0.5f,y +0.5f+YDIM*(z));
f5 = tex2D(texRef_f5B ,x-1+0.5f,y-1+0.5f+YDIM*(z));
f6 = tex2D(texRef_f6B ,x+1+0.5f,y-1+0.5f+YDIM*(z));
f7 = tex2D(texRef_f7B ,x+1+0.5f,y+1+0.5f+YDIM*(z));
f8 = tex2D(texRef_f8B ,x-1+0.5f,y+1+0.5f+YDIM*(z));
f15= tex2D(texRef_f15B,x-1+0.5f,y +0.5f+YDIM*(z+1));
f17= tex2D(texRef_f17B,x+1+0.5f,y +0.5f+YDIM*(z+1));
f10= tex2D(texRef_f10B,x-1+0.5f,y +0.5f+YDIM*(z-1));
f12= tex2D(texRef_f12B,x+1+0.5f,y +0.5f+YDIM*(z-1));
int im = ImageFcn(x,y,z);
if(im == 1 || im == 10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fout[j+pitch*YDIM*ZDIM*1 ] = f3 ;
fout[j+pitch*YDIM*ZDIM*2 ] = f4 ;
fout[j+pitch*YDIM*ZDIM*3 ] = f1 ;
fout[j+pitch*YDIM*ZDIM*4 ] = f2 ;
fout[j+pitch*YDIM*ZDIM*5 ] = f7 ;
fout[j+pitch*YDIM*ZDIM*6 ] = f8 ;
fout[j+pitch*YDIM*ZDIM*7 ] = f5 ;
fout[j+pitch*YDIM*ZDIM*8 ] = f6 ;
fout[j+pitch*YDIM*ZDIM*9 ] = f14;
fout[j+pitch*YDIM*ZDIM*10] = f17;
fout[j+pitch*YDIM*ZDIM*11] = f18;
fout[j+pitch*YDIM*ZDIM*12] = f15;
fout[j+pitch*YDIM*ZDIM*13] = f16;
fout[j+pitch*YDIM*ZDIM*14] = f9 ;
fout[j+pitch*YDIM*ZDIM*15] = f12;
fout[j+pitch*YDIM*ZDIM*16] = f13;
fout[j+pitch*YDIM*ZDIM*17] = f10;
fout[j+pitch*YDIM*ZDIM*18] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 21)//ysymm top
// {
// ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 22)//ysymm bot
// {
// ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 23)//zsymm top
// {
// zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
// else if(im == 24)//zsymm bot
// {
// zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch)] = f9 ;
fout[f_mem(10,x,y,z,pitch)] = f10;
fout[f_mem(11,x,y,z,pitch)] = f11;
fout[f_mem(12,x,y,z,pitch)] = f12;
fout[f_mem(13,x,y,z,pitch)] = f13;
fout[f_mem(14,x,y,z,pitch)] = f14;
fout[f_mem(15,x,y,z,pitch)] = f15;
fout[f_mem(16,x,y,z,pitch)] = f16;
fout[f_mem(17,x,y,z,pitch)] = f17;
fout[f_mem(18,x,y,z,pitch)] = f18;
}
syncthreads();
if(check[0] == 1 && t>=STARTF){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t],sumX[0]);
atomicAdd(&FY[t],sumY[0]);
atomicAdd(&FZ[t],sumZ[0]);
}
}
// }
}
__global__ void mrt_d_hybAB(float* fin, float* fout,
float omega, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;//;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
// __shared__ int check[1];
// check[0] = 0;
// syncthreads();
// if((REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 &&
// y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 &&
// z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)))
// {
// }
// else{
f0 = fin[j];
f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)];
f11= fin[f_mem(11,x ,y-1,z-1,pitch)];
f13= fin[f_mem(13,x ,y+1,z-1,pitch)];
f14= fin[f_mem(14,x ,y ,z+1,pitch)];
f16= fin[f_mem(16,x ,y-1,z+1,pitch)];
if(z != ZDIM-1){
f18= fin[f_mem(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1A ,x-1+0.5f,y +0.5f+YDIM*(z));
f3 = tex2D(texRef_f3A ,x+1+0.5f,y +0.5f+YDIM*(z));
f5 = tex2D(texRef_f5A ,x-1+0.5f,y-1+0.5f+YDIM*(z));
f6 = tex2D(texRef_f6A ,x+1+0.5f,y-1+0.5f+YDIM*(z));
f7 = tex2D(texRef_f7A ,x+1+0.5f,y+1+0.5f+YDIM*(z));
f8 = tex2D(texRef_f8A ,x-1+0.5f,y+1+0.5f+YDIM*(z));
f15= tex2D(texRef_f15A,x-1+0.5f,y +0.5f+YDIM*(z+1));
f17= tex2D(texRef_f17A,x+1+0.5f,y +0.5f+YDIM*(z+1));
f10= tex2D(texRef_f10A,x-1+0.5f,y +0.5f+YDIM*(z-1));
f12= tex2D(texRef_f12A,x+1+0.5f,y +0.5f+YDIM*(z-1));
int im = ImageFcn(x,y,z);
if(im == 1 || im == 10){//BB
// if(im == 10){
// check[0] = 1;
// sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
// sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
// sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
// }
// else{
// sumX[threadIdx.x]=0.f;
// sumY[threadIdx.x]=0.f;
// sumZ[threadIdx.x]=0.f;
// }
fout[j+pitch*YDIM*ZDIM*1 ] = f3 ;
fout[j+pitch*YDIM*ZDIM*2 ] = f4 ;
fout[j+pitch*YDIM*ZDIM*3 ] = f1 ;
fout[j+pitch*YDIM*ZDIM*4 ] = f2 ;
fout[j+pitch*YDIM*ZDIM*5 ] = f7 ;
fout[j+pitch*YDIM*ZDIM*6 ] = f8 ;
fout[j+pitch*YDIM*ZDIM*7 ] = f5 ;
fout[j+pitch*YDIM*ZDIM*8 ] = f6 ;
fout[j+pitch*YDIM*ZDIM*9 ] = f14;
fout[j+pitch*YDIM*ZDIM*10] = f17;
fout[j+pitch*YDIM*ZDIM*11] = f18;
fout[j+pitch*YDIM*ZDIM*12] = f15;
fout[j+pitch*YDIM*ZDIM*13] = f16;
fout[j+pitch*YDIM*ZDIM*14] = f9 ;
fout[j+pitch*YDIM*ZDIM*15] = f12;
fout[j+pitch*YDIM*ZDIM*16] = f13;
fout[j+pitch*YDIM*ZDIM*17] = f10;
fout[j+pitch*YDIM*ZDIM*18] = f11;
}
else{
// syncthreads();
// check[0] = 1;
// sumX[threadIdx.x]=0.f;
// sumY[threadIdx.x]=0.f;
// sumZ[threadIdx.x]=0.f;
// if(im == 0){
// }
// else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
//// if(im == 15)//DirichletNorth
//// {
//// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//// }
//// if(im == 16)//DirichletSouth
//// {
//// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//// }
// if(im == 21)//ysymm top
// {
// ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 22)//ysymm bot
// {
// ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// if(im == 23)//zsymm top
// {
// zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
// else if(im == 24)//zsymm bot
// {
// zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
//// }
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch)] = f9 ;
fout[f_mem(10,x,y,z,pitch)] = f10;
fout[f_mem(11,x,y,z,pitch)] = f11;
fout[f_mem(12,x,y,z,pitch)] = f12;
fout[f_mem(13,x,y,z,pitch)] = f13;
fout[f_mem(14,x,y,z,pitch)] = f14;
fout[f_mem(15,x,y,z,pitch)] = f15;
fout[f_mem(16,x,y,z,pitch)] = f16;
fout[f_mem(17,x,y,z,pitch)] = f17;
fout[f_mem(18,x,y,z,pitch)] = f18;
}
// syncthreads();
// if(check[0] == 1 && t>=STARTF){
// //reduction for force
// int nTotalThreads = blockDim.x;
// while(nTotalThreads > 1){
// int halfPoint = (nTotalThreads >> 1);
// if(threadIdx.x < halfPoint){
// sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
// sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
// sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
// }
// syncthreads();
// nTotalThreads = halfPoint;
// }
// if(threadIdx.x == 0){
// atomicAdd(&FX[t],sumX[0]);
// atomicAdd(&FY[t],sumY[0]);
// atomicAdd(&FZ[t],sumZ[0]);
// }
// }
// }
}
//{
// int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
// int y = threadIdx.y+blockIdx.y*blockDim.y;//;
// int z = threadIdx.z+blockIdx.z*blockDim.z;
// int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
// int im = ImageFcn(x,y,z);
// float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
//
//
//// if((REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 &&
//// y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 &&
//// z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
//// (x>XDIM-1)))
//// {
//// }
//// else{
//
// f0 = fin[j];
//// f2 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch)];
//// f4 = fin[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch)];
//// f9 = fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch)];
//// f11= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch)];
//// f13= fin[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch)];
//// f14= fin[f_mem(14,x ,y ,dmin(z+1,ZDIM),pitch)];
//// f16= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)];
//// f18= fin[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM),pitch)];
// f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)];
// f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)];
// f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)];
// f11= fin[f_mem(11,x ,y-1,z-1,pitch)];
// f13= fin[f_mem(13,x ,y+1,z-1,pitch)];
// f14= fin[f_mem(14,x ,y ,z+1,pitch)];
// f16= fin[f_mem(16,x ,y-1,z+1,pitch)];
// if(z != ZDIM-1){
// f18= fin[f_mem(18,x ,y+1,z+1,pitch)];
// }
// f1 = tex2D(texRef_f1A ,x-1+0.5f,y +0.5f+YDIM*(z));
// f3 = tex2D(texRef_f3A ,x+1+0.5f,y +0.5f+YDIM*(z));
// f5 = tex2D(texRef_f5A ,x-1+0.5f,y-1+0.5f+YDIM*(z));
// f6 = tex2D(texRef_f6A ,x+1+0.5f,y-1+0.5f+YDIM*(z));
// f7 = tex2D(texRef_f7A ,x+1+0.5f,y+1+0.5f+YDIM*(z));
// f8 = tex2D(texRef_f8A ,x-1+0.5f,y+1+0.5f+YDIM*(z));
// f15= tex2D(texRef_f15A,x-1+0.5f,y +0.5f+YDIM*(z+1));
// f17= tex2D(texRef_f17A,x+1+0.5f,y +0.5f+YDIM*(z+1));
// f10= tex2D(texRef_f10A,x-1+0.5f,y +0.5f+YDIM*(z-1));
// f12= tex2D(texRef_f12A,x+1+0.5f,y +0.5f+YDIM*(z-1));
//
//
// if(im == 1 || im ==10){//BB
// fout[j+pitch*YDIM*ZDIM*1 ] = f3 ;
// fout[j+pitch*YDIM*ZDIM*2 ] = f4 ;
// fout[j+pitch*YDIM*ZDIM*3 ] = f1 ;
// fout[j+pitch*YDIM*ZDIM*4 ] = f2 ;
// fout[j+pitch*YDIM*ZDIM*5 ] = f7 ;
// fout[j+pitch*YDIM*ZDIM*6 ] = f8 ;
// fout[j+pitch*YDIM*ZDIM*7 ] = f5 ;
// fout[j+pitch*YDIM*ZDIM*8 ] = f6 ;
// fout[j+pitch*YDIM*ZDIM*9 ] = f14;
// fout[j+pitch*YDIM*ZDIM*10] = f17;
// fout[j+pitch*YDIM*ZDIM*11] = f18;
// fout[j+pitch*YDIM*ZDIM*12] = f15;
// fout[j+pitch*YDIM*ZDIM*13] = f16;
// fout[j+pitch*YDIM*ZDIM*14] = f9 ;
// fout[j+pitch*YDIM*ZDIM*15] = f12;
// fout[j+pitch*YDIM*ZDIM*16] = f13;
// fout[j+pitch*YDIM*ZDIM*17] = f10;
// fout[j+pitch*YDIM*ZDIM*18] = f11;
//
//// fout[f_mem(1 ,x,y,z,pitch)] = f3 ;
//// fout[f_mem(2 ,x,y,z,pitch)] = f4 ;
//// fout[f_mem(3 ,x,y,z,pitch)] = f1 ;
//// fout[f_mem(4 ,x,y,z,pitch)] = f2 ;
//// fout[f_mem(5 ,x,y,z,pitch)] = f7 ;
//// fout[f_mem(6 ,x,y,z,pitch)] = f8 ;
//// fout[f_mem(7 ,x,y,z,pitch)] = f5 ;
//// fout[f_mem(8 ,x,y,z,pitch)] = f6 ;
//// fout[f_mem(9 ,x,y,z,pitch)] = f14;
//// fout[f_mem(10,x,y,z,pitch)] = f17;
//// fout[f_mem(11,x,y,z,pitch)] = f18;
//// fout[f_mem(12,x,y,z,pitch)] = f15;
//// fout[f_mem(13,x,y,z,pitch)] = f16;
//// fout[f_mem(14,x,y,z,pitch)] = f9 ;
//// fout[f_mem(15,x,y,z,pitch)] = f12;
//// fout[f_mem(16,x,y,z,pitch)] = f13;
//// fout[f_mem(17,x,y,z,pitch)] = f10;
//// fout[f_mem(18,x,y,z,pitch)] = f11;
// }
// else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 21)//ysymm top
// {
// ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 22)//ysymm bot
// {
// ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 23)//zsymm top
// {
// zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
// else if(im == 24)//zsymm bot
// {
// zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
//
// if(MODEL == "MRT")
// mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
// else if(MODEL == "BGK")
// bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
//
//
// fout[f_mem(0 ,x,y,z,pitch)] = f0 ;
// fout[f_mem(1 ,x,y,z,pitch)] = f1 ;
// fout[f_mem(2 ,x,y,z,pitch)] = f2 ;
// fout[f_mem(3 ,x,y,z,pitch)] = f3 ;
// fout[f_mem(4 ,x,y,z,pitch)] = f4 ;
// fout[f_mem(5 ,x,y,z,pitch)] = f5 ;
// fout[f_mem(6 ,x,y,z,pitch)] = f6 ;
// fout[f_mem(7 ,x,y,z,pitch)] = f7 ;
// fout[f_mem(8 ,x,y,z,pitch)] = f8 ;
// fout[f_mem(9 ,x,y,z,pitch)] = f9 ;
// fout[f_mem(10,x,y,z,pitch)] = f10;
// fout[f_mem(11,x,y,z,pitch)] = f11;
// fout[f_mem(12,x,y,z,pitch)] = f12;
// fout[f_mem(13,x,y,z,pitch)] = f13;
// fout[f_mem(14,x,y,z,pitch)] = f14;
// fout[f_mem(15,x,y,z,pitch)] = f15;
// fout[f_mem(16,x,y,z,pitch)] = f16;
// fout[f_mem(17,x,y,z,pitch)] = f17;
// fout[f_mem(18,x,y,z,pitch)] = f18;
// }
//// }
//}
__global__ void mrt_d_hybBA(float* fin, float* fout,
float omega, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// if((REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 &&
// y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 &&
// z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)))
// {
// }
// else{
f0 = fin[j];
// f2 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch)];
// f4 = fin[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch)];
// f9 = fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch)];
// f11= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch)];
// f13= fin[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch)];
// f14= fin[f_mem(14,x ,y ,dmin(z+1,ZDIM),pitch)];
// f16= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)];
// f18= fin[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM),pitch)];
f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)];
f11= fin[f_mem(11,x ,y-1,z-1,pitch)];
f13= fin[f_mem(13,x ,y+1,z-1,pitch)];
f14= fin[f_mem(14,x ,y ,z+1,pitch)];
f16= fin[f_mem(16,x ,y-1,z+1,pitch)];
if(z != ZDIM-1){
f18= fin[f_mem(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1B ,x-1+0.5f,y +0.5f+YDIM*(z));
f3 = tex2D(texRef_f3B ,x+1+0.5f,y +0.5f+YDIM*(z));
f5 = tex2D(texRef_f5B ,x-1+0.5f,y-1+0.5f+YDIM*(z));
f6 = tex2D(texRef_f6B ,x+1+0.5f,y-1+0.5f+YDIM*(z));
f7 = tex2D(texRef_f7B ,x+1+0.5f,y+1+0.5f+YDIM*(z));
f8 = tex2D(texRef_f8B ,x-1+0.5f,y+1+0.5f+YDIM*(z));
f15= tex2D(texRef_f15B,x-1+0.5f,y +0.5f+YDIM*(z+1));
f17= tex2D(texRef_f17B,x+1+0.5f,y +0.5f+YDIM*(z+1));
f10= tex2D(texRef_f10B,x-1+0.5f,y +0.5f+YDIM*(z-1));
f12= tex2D(texRef_f12B,x+1+0.5f,y +0.5f+YDIM*(z-1));
int im = ImageFcn(x,y,z);
if(im == 1 || im ==10){//BB
fout[j+pitch*YDIM*ZDIM*1 ] = f3 ;
fout[j+pitch*YDIM*ZDIM*2 ] = f4 ;
fout[j+pitch*YDIM*ZDIM*3 ] = f1 ;
fout[j+pitch*YDIM*ZDIM*4 ] = f2 ;
fout[j+pitch*YDIM*ZDIM*5 ] = f7 ;
fout[j+pitch*YDIM*ZDIM*6 ] = f8 ;
fout[j+pitch*YDIM*ZDIM*7 ] = f5 ;
fout[j+pitch*YDIM*ZDIM*8 ] = f6 ;
fout[j+pitch*YDIM*ZDIM*9 ] = f14;
fout[j+pitch*YDIM*ZDIM*10] = f17;
fout[j+pitch*YDIM*ZDIM*11] = f18;
fout[j+pitch*YDIM*ZDIM*12] = f15;
fout[j+pitch*YDIM*ZDIM*13] = f16;
fout[j+pitch*YDIM*ZDIM*14] = f9 ;
fout[j+pitch*YDIM*ZDIM*15] = f12;
fout[j+pitch*YDIM*ZDIM*16] = f13;
fout[j+pitch*YDIM*ZDIM*17] = f10;
fout[j+pitch*YDIM*ZDIM*18] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 21)//ysymm top
// {
// ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 22)//ysymm bot
// {
// ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 23)//zsymm top
// {
// zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
// else if(im == 24)//zsymm bot
// {
// zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch)] = f9 ;
fout[f_mem(10,x,y,z,pitch)] = f10;
fout[f_mem(11,x,y,z,pitch)] = f11;
fout[f_mem(12,x,y,z,pitch)] = f12;
fout[f_mem(13,x,y,z,pitch)] = f13;
fout[f_mem(14,x,y,z,pitch)] = f14;
fout[f_mem(15,x,y,z,pitch)] = f15;
fout[f_mem(16,x,y,z,pitch)] = f16;
fout[f_mem(17,x,y,z,pitch)] = f17;
fout[f_mem(18,x,y,z,pitch)] = f18;
}
// }
}
__global__ void mrt_d_single(float* fA, float* fB,
float omega, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
if(REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1
&& y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
(x>XDIM-1)){
}
else{
f0 = fA[j];
// if(x != 0){
// f1 = fA[f_mem(1 ,x-1,y ,z ,pitch)];
// if(y != 0){
// f5 = fA[f_mem(5 ,x-1,y-1,z ,pitch)];
// }
// if(y != YDIM-1){
// f8 = fA[f_mem(8 ,x-1,y+1,z ,pitch)];
// }
// if(z != 0){
// f10= fA[f_mem(10,x-1,y ,z-1,pitch)];
// }
// if(z != ZDIM-1){
// f15= fA[f_mem(15,x-1,y ,z+1,pitch)];
// }
// }
//
// if(x != XDIM-1){
// f3 = fA[f_mem(3 ,x+1,y ,z ,pitch)];
// if(y != 0){
// f6 = fA[f_mem(6 ,x+1,y-1,z ,pitch)];
// }
// if(y != YDIM-1){
// f7 = fA[f_mem(7 ,x+1,y+1,z ,pitch)];
// }
// if(z != 0){
// f12= fA[f_mem(12,x+1,y ,z-1,pitch)];
// }
// if(z != ZDIM-1){
// f17= fA[f_mem(17,x+1,y ,z+1,pitch)];
// }
// }
//
// if(y != 0){
// f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)];
// if(z != 0){
// f11= fA[f_mem(11,x ,y-1,z-1,pitch)];
// }
// if(z != ZDIM-1){
// f16= fA[f_mem(16,x ,y-1,z+1,pitch)];
// }
// }
//
// if(y != YDIM-1){
// f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)];
// if(z != 0){
// f13= fA[f_mem(13,x ,y+1,z-1,pitch)];
// }
// if(z != ZDIM-1){
// f18= fA[f_mem(18,x ,y+1,z+1,pitch)];
// }
// }
//
// if(z != 0){
// f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)];
// }
// if(z != ZDIM-1){
// f14= fA[f_mem(14,x ,y ,z+1,pitch)];
// }
// int a = (x+y*pitch+z*YDIM*pitch)+pitch*YDIM*ZDIM;
// f1 = fA[a-1];//fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch)];
// a += pitch*YDIM*ZDIM;//+1-pitch;
// f2 = fA[a-pitch];//fA[f_me;//m(2 ,x ,dmax(y-1) ,z ,pitch)];
// a += pitch*YDIM*ZDIM;//+1+pitch;
// f3 = fA[a+1];//fA[f_me;//m(3 ,dmin(x+1,XDIM),y ,z ,pitch)];
// a += pitch*YDIM*ZDIM;//-1+pitch;
// f4 = fA[a+pitch];//fA[f_me;//m(4 ,x ,dmin(y+1,YDIM),z ,pitch)];
// a += pitch*YDIM*ZDIM;//-1-2*pitch;
// f5 = fA[a-1-pitch];//fA[f_me;//m(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch)];
// a += pitch*YDIM*ZDIM;//+2;
// f6 = fA[a+1-pitch];//fA[f_me;//m(6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch)];
// a += pitch*YDIM*ZDIM;//+2*pitch;
// f7 = fA[a+1+pitch];//fA[f_me;//m(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch)];
// a += pitch*YDIM*ZDIM;//-2;
// f8 = fA[a-1+pitch];//fA[f_me;//m(8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch)];
// a += pitch*YDIM*ZDIM;//+1-pitch-pitch*YDIM;
// f9 = fA[a];//fA[f_me;//m(9 ,x ,y ,dmax(z-1) ,pitch)];
// a += pitch*YDIM*ZDIM;//-1;
// f10= fA[a-1];//fA[f_me;//m(10,dmax(x-1) ,y ,dmax(z-1) ,pitch)];
// a += pitch*YDIM*ZDIM;//+1-pitch;
// f11= fA[a-pitch];//fA[f_me;//m(11,x ,dmax(y-1) ,dmax(z-1) ,pitch)];
// a += pitch*YDIM*ZDIM;//+1+pitch;
// f12= fA[a+1];//fA[f_me;//m(12,dmin(x+1,XDIM),y ,dmax(z-1) ,pitch)];
// a += pitch*YDIM*ZDIM;//-1+pitch;
// f13= fA[a+pitch];//fA[f_me;//m(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch)];
// if(z == ZDIM-1){
// a += pitch*YDIM*ZDIM;//-pitch+pitch*YDIM;
// f14= fA[a+YDIM*pitch];//fA[f_me;//m(14,x ,y ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//-1;
// f15= fA[a-1+YDIM*pitch];//fA[f_me;//m(15,dmax(x-1) ,y ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//+1-pitch;
// f16= fA[a-pitch+YDIM*pitch];//fA[f_me;//m(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//+1+pitch;
// f17= fA[a+1+YDIM*pitch];//fA[f_me;//m(17,dmin(x+1,XDIM),y ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//-1;//+pitch;
// f18= fA[a+pitch+YDIM*pitch];//fA[f_me;//m(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM),pitch)];
// }
// else{
// a += pitch*YDIM*ZDIM;//-pitch+2*pitch*YDIM;
// f14= fA[a-YDIM*pitch];//fA[f_me;//m(14,x ,y ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//-1;
// f15= fA[a-1-YDIM*pitch];//fA[f_me;//m(15,dmax(x-1) ,y ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//+1-pitch;
// f16= fA[a-pitch-YDIM*pitch];//fA[f_me;//m(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//+1+pitch;
// f17= fA[a+1-YDIM*pitch];//fA[f_me;//m(17,dmin(x+1,XDIM),y ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//-1+pitch;
// f18= fA[a+pitch-YDIM*pitch];//fA[f_me;//m(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM),pitch)];
// }
// int a = f_mem(1,x,y,z,pitch);
// f1 = fA[a];
// f3 = fA[a];
// f2 = fA[a];
// f5 = fA[a];
// f6 = fA[a];
// f4 = fA[a];
// f7 = fA[a];
// f8 = fA[a];
// f9 = fA[a];
// f10= fA[a];
// f11= fA[a];
// f12= fA[a];
// f13= fA[a];
// f14= fA[a];
// f15= fA[a];
// f16= fA[a];
// f17= fA[a];
// f18= fA[a];
f1 = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch)];
f3 = fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch)];
f2 = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch)];
f5 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch)];
f6 = fA[f_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch)];
f4 = fA[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch)];
f7 = fA[f_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch)];
f8 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch)];
f9 = fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch)];
f10= fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch)];
f11= fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch)];
f12= fA[f_mem(12,dmin(x+1,XDIM),y ,dmax(z-1) ,pitch)];
f13= fA[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch)];
f14= fA[f_mem(14,x ,y ,dmin(z+1,ZDIM),pitch)];
f15= fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,ZDIM),pitch)];
f16= fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)];
f17= fA[f_mem(17,dmin(x+1,XDIM),y ,dmin(z+1,ZDIM),pitch)];
f18= fA[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM),pitch)];
if(im == 1 || im ==10){//BB
fB[f_mem(1 ,x,y,z,pitch)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch)] = f14;
fB[f_mem(10,x,y,z,pitch)] = f17;
fB[f_mem(11,x,y,z,pitch)] = f18;
fB[f_mem(12,x,y,z,pitch)] = f15;
fB[f_mem(13,x,y,z,pitch)] = f16;
fB[f_mem(14,x,y,z,pitch)] = f9 ;
fB[f_mem(15,x,y,z,pitch)] = f12;
fB[f_mem(16,x,y,z,pitch)] = f13;
fB[f_mem(17,x,y,z,pitch)] = f10;
fB[f_mem(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 21)//ysymm top
// {
// ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 22)//ysymm bot
// {
// ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 23)//zsymm top
// {
// zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
// else if(im == 24)//zsymm bot
// {
// zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch)] = f9 ;
fB[f_mem(10,x,y,z,pitch)] = f10;
fB[f_mem(11,x,y,z,pitch)] = f11;
fB[f_mem(12,x,y,z,pitch)] = f12;
fB[f_mem(13,x,y,z,pitch)] = f13;
fB[f_mem(14,x,y,z,pitch)] = f14;
fB[f_mem(15,x,y,z,pitch)] = f15;
fB[f_mem(16,x,y,z,pitch)] = f16;
fB[f_mem(17,x,y,z,pitch)] = f17;
fB[f_mem(18,x,y,z,pitch)] = f18;
}
}
}
__global__ void initialize_single(float *f, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.0f;
v = 0.0f;
w = 0.0f;
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
f[j+0 *pitch*YDIM*ZDIM]= 1.0f/3.0f*(rho-1.5f*usqr);
f[j+1 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+2 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+3 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+4 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+5 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f[j+6 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f[j+7 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f[j+8 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f[j+9 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+10*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f[j+11*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f[j+12*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f[j+13*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f[j+14*pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+15*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f[j+16*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f[j+17*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f[j+18*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
float feq0 = 0.1904761791f*rho;
float feq1 = 0.1031746045f*rho;
float feq2 = 0.1031746045f*rho;
float feq3 = 0.1031746045f*rho;
float feq4 = 0.1031746045f*rho;
float feq5 = 0.0158730149f*rho;
float feq6 = 0.0158730149f*rho;
float feq7 = 0.0158730149f*rho;
float feq8 = 0.0158730149f*rho;
float feq9 = 0.1031746045f*rho;
float feq10= 0.0158730149f*rho;
float feq11= 0.0158730149f*rho;
float feq12= 0.0158730149f*rho;
float feq13= 0.0158730149f*rho;
float feq14= 0.1031746045f*rho;
float feq15= 0.0158730149f*rho;
float feq16= 0.0158730149f*rho;
float feq17= 0.0158730149f*rho;
float feq18= 0.0158730149f*rho;
f[j+0 *pitch*YLRDIM*ZLRDIM]= feq0 ;
f[j+1 *pitch*YLRDIM*ZLRDIM]= feq1 ;
f[j+2 *pitch*YLRDIM*ZLRDIM]= feq2 ;
f[j+3 *pitch*YLRDIM*ZLRDIM]= feq3 ;
f[j+4 *pitch*YLRDIM*ZLRDIM]= feq4 ;
f[j+5 *pitch*YLRDIM*ZLRDIM]= feq5 ;
f[j+6 *pitch*YLRDIM*ZLRDIM]= feq6 ;
f[j+7 *pitch*YLRDIM*ZLRDIM]= feq7 ;
f[j+8 *pitch*YLRDIM*ZLRDIM]= feq8 ;
f[j+9 *pitch*YLRDIM*ZLRDIM]= feq9 ;
f[j+10*pitch*YLRDIM*ZLRDIM]= feq10;
f[j+11*pitch*YLRDIM*ZLRDIM]= feq11;
f[j+12*pitch*YLRDIM*ZLRDIM]= feq12;
f[j+13*pitch*YLRDIM*ZLRDIM]= feq13;
f[j+14*pitch*YLRDIM*ZLRDIM]= feq14;
f[j+15*pitch*YLRDIM*ZLRDIM]= feq15;
f[j+16*pitch*YLRDIM*ZLRDIM]= feq16;
f[j+17*pitch*YLRDIM*ZLRDIM]= feq17;
f[j+18*pitch*YLRDIM*ZLRDIM]= feq18;
if(x == XDIM-1){
for(int i = XDIM; i<pitch; i++){
j = i+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
f[j+0 *pitch*YDIM*ZDIM]=0.f;
f[j+1 *pitch*YDIM*ZDIM]=0.f;
f[j+2 *pitch*YDIM*ZDIM]=0.f;
f[j+3 *pitch*YDIM*ZDIM]=0.f;
f[j+4 *pitch*YDIM*ZDIM]=0.f;
f[j+5 *pitch*YDIM*ZDIM]=0.f;
f[j+6 *pitch*YDIM*ZDIM]=0.f;
f[j+7 *pitch*YDIM*ZDIM]=0.f;
f[j+8 *pitch*YDIM*ZDIM]=0.f;
f[j+9 *pitch*YDIM*ZDIM]=0.f;
f[j+10*pitch*YDIM*ZDIM]=0.f;
f[j+11*pitch*YDIM*ZDIM]=0.f;
f[j+12*pitch*YDIM*ZDIM]=0.f;
f[j+13*pitch*YDIM*ZDIM]=0.f;
f[j+14*pitch*YDIM*ZDIM]=0.f;
f[j+15*pitch*YDIM*ZDIM]=0.f;
f[j+16*pitch*YDIM*ZDIM]=0.f;
f[j+17*pitch*YDIM*ZDIM]=0.f;
f[j+18*pitch*YDIM*ZDIM]=0.f;
}
}
}
__global__ void initialize_LR(float *f, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.0f;
v = 0.0f;
w = 0.0f;
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
// f[j+0 *pitch*YLRDIM*ZLRDIM]= 1.0f/3.0f*(rho-1.5f*usqr);
// f[j+1 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// f[j+2 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// f[j+3 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// f[j+4 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// f[j+5 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
// f[j+6 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// f[j+7 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// f[j+8 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
// f[j+9 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// f[j+10*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
// f[j+11*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
// f[j+12*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// f[j+13*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
// f[j+14*pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// f[j+15*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
// f[j+16*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
// f[j+17*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// f[j+18*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
float feq0 = 0.1904761791f*rho;
float feq1 = 0.1031746045f*rho;
float feq2 = 0.1031746045f*rho;
float feq3 = 0.1031746045f*rho;
float feq4 = 0.1031746045f*rho;
float feq5 = 0.0158730149f*rho;
float feq6 = 0.0158730149f*rho;
float feq7 = 0.0158730149f*rho;
float feq8 = 0.0158730149f*rho;
float feq9 = 0.1031746045f*rho;
float feq10= 0.0158730149f*rho;
float feq11= 0.0158730149f*rho;
float feq12= 0.0158730149f*rho;
float feq13= 0.0158730149f*rho;
float feq14= 0.1031746045f*rho;
float feq15= 0.0158730149f*rho;
float feq16= 0.0158730149f*rho;
float feq17= 0.0158730149f*rho;
float feq18= 0.0158730149f*rho;
f[j+0 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq0 ;
f[j+1 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq1 ;
f[j+2 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq2 ;
f[j+3 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq3 ;
f[j+4 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq4 ;
f[j+5 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq5 ;
f[j+6 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq6 ;
f[j+7 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq7 ;
f[j+8 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq8 ;
f[j+9 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq9 ;
f[j+10*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq10;
f[j+11*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq11;
f[j+12*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq12;
f[j+13*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq13;
f[j+14*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq14;
f[j+15*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq15;
f[j+16*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq16;
f[j+17*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq17;
f[j+18*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq18;
}
__global__ void initialize(float* f0, float* f1, float* f2,
float* f3, float* f4, float* f5,
float* f6, float* f7, float* f8, float* f9,
float* f10, float* f11, float* f12,
float* f13, float* f14, float* f15,
float* f16, float* f17, float* f18,
size_t pitch)//pitch in elements
//__global__ void initialize(void** f0in, void** f1in,
// int w, int h, int pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
// int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
// f1out[j] = tex2D(texRef_f2A,x,y+h*z);
float u,v,w,rho,feq,usqr;
rho = 1.0f;
u = 0.0f;
v = 0.0f;
w = 0.0f;
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
feq = 1.0f/3.0f*(rho-1.5f*usqr);
f0[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f1[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f2[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f3[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f4[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f5[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f6[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f7[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f8[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f9[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f10[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f11[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f12[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f13[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f14[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f15[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f16[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f17[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f18[j] = feq;
}
int main(int argc, char *argv[])
{
//int *image_d, *image_h;
ofstream output;
ofstream output2;
string FileName = CASENAME;
//output.open ("LBM1_out.dat");
output.open ((FileName+".dat").c_str());
output2.open ((FileName+".force").c_str());
size_t memsize, memsize2;
size_t pitch = 0;
size_t pitch2 = 0;
int i, n, nBlocks, nBlocks2, n2;
float omega, CharLength, omega2;
if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f){
cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl;
return 0;
}
CharLength = OBSTR*2.f;
omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
if(LRFACTOR == 0.25f){
omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega2-1.0f));
}
float SF_cf = omega*(1.0f-omega2)/((1.0f-omega)*omega2/LRFACTOR);
float SF_fc = 1.f/SF_cf;
cout<<"omega: "<<omega<<endl;
cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
cout<<"LRblocksize: "<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl;
cout<<"LRgrid: "<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
cout<<"TMAX: "<<TMAX<<endl;
cout<<"Method: "<<METHOD<<endl;
cout<<"Model: "<<MODEL<<endl;
nBlocks = ((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX)*(YDIM/BLOCKSIZEY+YDIM%BLOCKSIZEY)
*(ZDIM/BLOCKSIZEZ+ZDIM%BLOCKSIZEZ);
nBlocks2 = (XLRDIM/BLOCKSIZELRX+XLRDIM%BLOCKSIZELRX)*(YLRDIM/BLOCKSIZELRY+YLRDIM%BLOCKSIZELRY)
*(ZLRDIM/BLOCKSIZELRZ+ZLRDIM%BLOCKSIZELRZ);
int B = BLOCKSIZEX*BLOCKSIZEY*BLOCKSIZEZ;
int B2 = BLOCKSIZELRX*BLOCKSIZELRY*BLOCKSIZELRZ;
n = nBlocks*B;
n2 = nBlocks2*B2;
cout<<"nBlocks:"<<nBlocks<<endl;
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
dim3 grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),YDIM/BLOCKSIZEY,ZDIM/BLOCKSIZEZ);
dim3 threads2(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ);
dim3 grid2(XLRDIM/BLOCKSIZELRX,YLRDIM/BLOCKSIZELRY,ZLRDIM/BLOCKSIZELRZ);
memsize = n*sizeof(float);
//memsize_int = n*sizeof(int);
memsize2 = n2*sizeof(float);
//hipExtent extent = make_hipExtent(XDIM*sizeof(float),YDIM,ZDIM);
//image_h = (int *)malloc(memsize_int);
float *fA_h,*fA_d,*fB_d,*fC_h,*fC_d,*fD_d;
float *FX_h,*FY_h,*FZ_h,*FX_d,*FY_d,*FZ_d;
fA_h = (float *)malloc(memsize*19);
fC_h = (float *)malloc(memsize2*19);
FX_h = (float *)malloc(TMAX*sizeof(float));
FY_h = (float *)malloc(TMAX*sizeof(float));
FZ_h = (float *)malloc(TMAX*sizeof(float));
hipMallocPitch((void **) &fA_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19);
hipMallocPitch((void **) &fB_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19);
if(REFINEMENT == "YES"){
hipMallocPitch((void **) &fC_d, &pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM*19);
hipMallocPitch((void **) &fD_d, &pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM*19);
}
hipMalloc((void **) &FX_d, TMAX*sizeof(float));
hipMalloc((void **) &FY_d, TMAX*sizeof(float));
hipMalloc((void **) &FZ_d, TMAX*sizeof(float));
cout<<pitch<<", "<<pitch2<<endl;
size_t pitch_elements = pitch/sizeof(float);
size_t pitch_elements2 = pitch2/sizeof(float);
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
for (i = 0; i < n*19; i++)
{
fA_h[i] = i;
}
for (i = 0; i < n2*19; i++)
{
fC_h[i] = 0;
}
for (i = 0; i < TMAX; i++){
FX_h[i] = 0.f;
FY_h[i] = 0.f;
FZ_h[i] = 0.f;
}
hipMemcpy(FX_d, FX_h, TMAX*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(FY_d, FY_h, TMAX*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(FZ_d, FZ_h, TMAX*sizeof(float), hipMemcpyHostToDevice);
// for (i = 0; i < n; i++)
// {
// int x = i%XDIM;
// int y = (i/XDIM)%YDIM;
// int z = (i/XDIM)/YDIM;
//// image_h[i] = 0;
//// if(x < 1) image_h[i] = 1;//DirichletWest
//// if(x > XDIM-2) image_h[i] = 1;//BB
//// if(y < 1) image_h[i] = 1;//BB
//// if(y > YDIM-2) image_h[i] = 1;//BB
//// if(z < 1) image_h[i] = 1;//DirichletWest
//// if(z > ZDIM-2) image_h[i] = 1;//BB
// }
//hipMemcpy(image_d, image_h, memsize_int, hipMemcpyHostToDevice);
if(true)//texture settings
{
texRef_f0B.normalized = false;
texRef_f1B.normalized = false;
texRef_f2B.normalized = false;
texRef_f3B.normalized = false;
texRef_f4B.normalized = false;
texRef_f5B.normalized = false;
texRef_f6B.normalized = false;
texRef_f7B.normalized = false;
texRef_f8B.normalized = false;
texRef_f9B.normalized = false;
texRef_f10B.normalized = false;
texRef_f11B.normalized = false;
texRef_f12B.normalized = false;
texRef_f13B.normalized = false;
texRef_f14B.normalized = false;
texRef_f15B.normalized = false;
texRef_f16B.normalized = false;
texRef_f17B.normalized = false;
texRef_f18B.normalized = false;
texRef_f0B.filterMode = hipFilterModeLinear;
texRef_f1B.filterMode = hipFilterModeLinear;
texRef_f2B.filterMode = hipFilterModeLinear;
texRef_f3B.filterMode = hipFilterModeLinear;
texRef_f4B.filterMode = hipFilterModeLinear;
texRef_f5B.filterMode = hipFilterModeLinear;
texRef_f6B.filterMode = hipFilterModeLinear;
texRef_f7B.filterMode = hipFilterModeLinear;
texRef_f8B.filterMode = hipFilterModeLinear;
texRef_f9B.filterMode = hipFilterModeLinear;
texRef_f10B.filterMode = hipFilterModeLinear;
texRef_f11B.filterMode = hipFilterModeLinear;
texRef_f12B.filterMode = hipFilterModeLinear;
texRef_f13B.filterMode = hipFilterModeLinear;
texRef_f14B.filterMode = hipFilterModeLinear;
texRef_f15B.filterMode = hipFilterModeLinear;
texRef_f16B.filterMode = hipFilterModeLinear;
texRef_f17B.filterMode = hipFilterModeLinear;
texRef_f18B.filterMode = hipFilterModeLinear;
texRef_f0A.normalized = false;
texRef_f1A.normalized = false;
texRef_f2A.normalized = false;
texRef_f3A.normalized = false;
texRef_f4A.normalized = false;
texRef_f5A.normalized = false;
texRef_f6A.normalized = false;
texRef_f7A.normalized = false;
texRef_f8A.normalized = false;
texRef_f9A.normalized = false;
texRef_f10A.normalized = false;
texRef_f11A.normalized = false;
texRef_f12A.normalized = false;
texRef_f13A.normalized = false;
texRef_f14A.normalized = false;
texRef_f15A.normalized = false;
texRef_f16A.normalized = false;
texRef_f17A.normalized = false;
texRef_f18A.normalized = false;
texRef_f0A.filterMode = hipFilterModeLinear;
texRef_f1A.filterMode = hipFilterModeLinear;
texRef_f2A.filterMode = hipFilterModeLinear;
texRef_f3A.filterMode = hipFilterModeLinear;
texRef_f4A.filterMode = hipFilterModeLinear;
texRef_f5A.filterMode = hipFilterModeLinear;
texRef_f6A.filterMode = hipFilterModeLinear;
texRef_f7A.filterMode = hipFilterModeLinear;
texRef_f8A.filterMode = hipFilterModeLinear;
texRef_f9A.filterMode = hipFilterModeLinear;
texRef_f10A.filterMode = hipFilterModeLinear;
texRef_f11A.filterMode = hipFilterModeLinear;
texRef_f12A.filterMode = hipFilterModeLinear;
texRef_f13A.filterMode = hipFilterModeLinear;
texRef_f14A.filterMode = hipFilterModeLinear;
texRef_f15A.filterMode = hipFilterModeLinear;
texRef_f16A.filterMode = hipFilterModeLinear;
texRef_f17A.filterMode = hipFilterModeLinear;
texRef_f18A.filterMode = hipFilterModeLinear;
// if(REFINEMENT == "YES"){
texRef_f0C.normalized = false;
texRef_f1C.normalized = false;
texRef_f2C.normalized = false;
texRef_f3C.normalized = false;
texRef_f4C.normalized = false;
texRef_f5C.normalized = false;
texRef_f6C.normalized = false;
texRef_f7C.normalized = false;
texRef_f8C.normalized = false;
texRef_f9C.normalized = false;
texRef_f10C.normalized = false;
texRef_f11C.normalized = false;
texRef_f12C.normalized = false;
texRef_f13C.normalized = false;
texRef_f14C.normalized = false;
texRef_f15C.normalized = false;
texRef_f16C.normalized = false;
texRef_f17C.normalized = false;
texRef_f18C.normalized = false;
texRef_f0C.filterMode = hipFilterModeLinear;
texRef_f1C.filterMode = hipFilterModeLinear;
texRef_f2C.filterMode = hipFilterModeLinear;
texRef_f3C.filterMode = hipFilterModeLinear;
texRef_f4C.filterMode = hipFilterModeLinear;
texRef_f5C.filterMode = hipFilterModeLinear;
texRef_f6C.filterMode = hipFilterModeLinear;
texRef_f7C.filterMode = hipFilterModeLinear;
texRef_f8C.filterMode = hipFilterModeLinear;
texRef_f9C.filterMode = hipFilterModeLinear;
texRef_f10C.filterMode = hipFilterModeLinear;
texRef_f11C.filterMode = hipFilterModeLinear;
texRef_f12C.filterMode = hipFilterModeLinear;
texRef_f13C.filterMode = hipFilterModeLinear;
texRef_f14C.filterMode = hipFilterModeLinear;
texRef_f15C.filterMode = hipFilterModeLinear;
texRef_f16C.filterMode = hipFilterModeLinear;
texRef_f17C.filterMode = hipFilterModeLinear;
texRef_f18C.filterMode = hipFilterModeLinear;
texRef_f0D.normalized = false;
texRef_f1D.normalized = false;
texRef_f2D.normalized = false;
texRef_f3D.normalized = false;
texRef_f4D.normalized = false;
texRef_f5D.normalized = false;
texRef_f6D.normalized = false;
texRef_f7D.normalized = false;
texRef_f8D.normalized = false;
texRef_f9D.normalized = false;
texRef_f10D.normalized = false;
texRef_f11D.normalized = false;
texRef_f12D.normalized = false;
texRef_f13D.normalized = false;
texRef_f14D.normalized = false;
texRef_f15D.normalized = false;
texRef_f16D.normalized = false;
texRef_f17D.normalized = false;
texRef_f18D.normalized = false;
texRef_f0D.filterMode = hipFilterModeLinear;
texRef_f1D.filterMode = hipFilterModeLinear;
texRef_f2D.filterMode = hipFilterModeLinear;
texRef_f3D.filterMode = hipFilterModeLinear;
texRef_f4D.filterMode = hipFilterModeLinear;
texRef_f5D.filterMode = hipFilterModeLinear;
texRef_f6D.filterMode = hipFilterModeLinear;
texRef_f7D.filterMode = hipFilterModeLinear;
texRef_f8D.filterMode = hipFilterModeLinear;
texRef_f9D.filterMode = hipFilterModeLinear;
texRef_f10D.filterMode = hipFilterModeLinear;
texRef_f11D.filterMode = hipFilterModeLinear;
texRef_f12D.filterMode = hipFilterModeLinear;
texRef_f13D.filterMode = hipFilterModeLinear;
texRef_f14D.filterMode = hipFilterModeLinear;
texRef_f15D.filterMode = hipFilterModeLinear;
texRef_f16D.filterMode = hipFilterModeLinear;
texRef_f17D.filterMode = hipFilterModeLinear;
texRef_f18D.filterMode = hipFilterModeLinear;
// }
for(int i = 0; i<2; i++){
texRef_f0A.addressMode[i] = hipAddressModeClamp;
texRef_f1A.addressMode[i] = hipAddressModeClamp;
texRef_f2A.addressMode[i] = hipAddressModeClamp;
texRef_f3A.addressMode[i] = hipAddressModeClamp;
texRef_f4A.addressMode[i] = hipAddressModeClamp;
texRef_f5A.addressMode[i] = hipAddressModeClamp;
texRef_f6A.addressMode[i] = hipAddressModeClamp;
texRef_f7A.addressMode[i] = hipAddressModeClamp;
texRef_f8A.addressMode[i] = hipAddressModeClamp;
texRef_f9A.addressMode[i] = hipAddressModeClamp;
texRef_f10A.addressMode[i] = hipAddressModeClamp;
texRef_f11A.addressMode[i] = hipAddressModeClamp;
texRef_f12A.addressMode[i] = hipAddressModeClamp;
texRef_f13A.addressMode[i] = hipAddressModeClamp;
texRef_f14A.addressMode[i] = hipAddressModeClamp;
texRef_f15A.addressMode[i] = hipAddressModeClamp;
texRef_f16A.addressMode[i] = hipAddressModeClamp;
texRef_f17A.addressMode[i] = hipAddressModeClamp;
texRef_f18A.addressMode[i] = hipAddressModeClamp;
texRef_f0B.addressMode[i] = hipAddressModeClamp;
texRef_f1B.addressMode[i] = hipAddressModeClamp;
texRef_f2B.addressMode[i] = hipAddressModeClamp;
texRef_f3B.addressMode[i] = hipAddressModeClamp;
texRef_f4B.addressMode[i] = hipAddressModeClamp;
texRef_f5B.addressMode[i] = hipAddressModeClamp;
texRef_f6B.addressMode[i] = hipAddressModeClamp;
texRef_f7B.addressMode[i] = hipAddressModeClamp;
texRef_f8B.addressMode[i] = hipAddressModeClamp;
texRef_f9B.addressMode[i] = hipAddressModeClamp;
texRef_f10B.addressMode[i] = hipAddressModeClamp;
texRef_f11B.addressMode[i] = hipAddressModeClamp;
texRef_f12B.addressMode[i] = hipAddressModeClamp;
texRef_f13B.addressMode[i] = hipAddressModeClamp;
texRef_f14B.addressMode[i] = hipAddressModeClamp;
texRef_f15B.addressMode[i] = hipAddressModeClamp;
texRef_f16B.addressMode[i] = hipAddressModeClamp;
texRef_f17B.addressMode[i] = hipAddressModeClamp;
texRef_f18B.addressMode[i] = hipAddressModeClamp;
}
}
hipMemcpy2D(fA_d,pitch ,fA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,hipMemcpyHostToDevice);
hipMemcpy2D(fB_d,pitch ,fA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,hipMemcpyHostToDevice);
if(REFINEMENT == "YES"){
hipMemcpy2D(fC_d,pitch2,fC_h,XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM*19,hipMemcpyHostToDevice);
hipMemcpy2D(fD_d,pitch2,fC_h,XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM*19,hipMemcpyHostToDevice);
}
// for (i = 0; i < n*19; i++)
// {
// fA_h[i] = 0;
// fC_h[i] = 1;
// }
if(true)//bind texture
{
hipBindTexture2D(0,&texRef_f0A, fA_d ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f1A, fA_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f2A, fA_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f3A, fA_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f4A, fA_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f5A, fA_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f6A, fA_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f7A, fA_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f8A, fA_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f9A, fA_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f10A,fA_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f11A,fA_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f12A,fA_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f13A,fA_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f14A,fA_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f15A,fA_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f16A,fA_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f17A,fA_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f18A,fA_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f0B, fB_d ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f1B, fB_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f2B, fB_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f3B, fB_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f4B, fB_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f5B, fB_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f6B, fB_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f7B, fB_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f8B, fB_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f9B, fB_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f10B,fB_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f11B,fB_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f12B,fB_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f13B,fB_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f14B,fB_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f15B,fB_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f16B,fB_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f17B,fB_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch);
hipBindTexture2D(0,&texRef_f18B,fB_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch);
// if(REFINEMENT == "YES"){
hipBindTexture2D(0,&texRef_f0C, fC_d ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f1C, fC_d+pitch_elements2*YLRDIM*ZLRDIM ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f2C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*2 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f3C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*3 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f4C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*4 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f5C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*5 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f6C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*6 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f7C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*7 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f8C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*8 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f9C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*9 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f10C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*10,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f11C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*11,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f12C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*12,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f13C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*13,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f14C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*14,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f15C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*15,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f16C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*16,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f17C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*17,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f18C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*18,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f0D, fD_d ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f1D, fD_d+pitch_elements2*YLRDIM*ZLRDIM ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f2D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*2 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f3D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*3 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f4D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*4 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f5D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*5 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f6D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*6 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f7D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*7 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f8D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*8 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f9D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*9 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f10D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*10,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f11D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*11,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f12D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*12,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f13D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*13,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f14D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*14,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f15D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*15,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f16D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*16,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f17D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*17,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
hipBindTexture2D(0,&texRef_f18D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*18,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
// }
}
hipLaunchKernelGGL(( initialize_single), dim3(grid), dim3(threads), 0, 0, fA_d,pitch_elements);
hipLaunchKernelGGL(( initialize_single), dim3(grid), dim3(threads), 0, 0, fB_d,pitch_elements);
if(REFINEMENT == "YES"){
hipLaunchKernelGGL(( initialize_LR), dim3(grid2), dim3(threads2), 0, 0, fC_d,pitch_elements2);
hipLaunchKernelGGL(( initialize_LR), dim3(grid2), dim3(threads2), 0, 0, fD_d,pitch_elements2);
}
hipFuncSetCacheConfig(mrt_d_single,hipFuncCachePreferL1);
hipFuncSetCacheConfig(mrt_d_hybAB,hipFuncCachePreferL1);
hipFuncSetCacheConfig(mrt_d_hybAB_force,hipFuncCachePreferL1);
hipFuncSetCacheConfig(LR_d_hybABCD_force,hipFuncCachePreferL1);
hipFuncSetCacheConfig(LR_d_hybABCD,hipFuncCachePreferL1);
hipFuncSetCacheConfig(LR_d_hybABCD2,hipFuncCachePreferL1);
hipFuncSetCacheConfig(LR_d_hybABDC2,hipFuncCachePreferL1);
hipFuncSetCacheConfig(LR_d_hybABDC_Interp,hipFuncCachePreferL1);
hipFuncSetCacheConfig(mrt_d_hybBA,hipFuncCachePreferL1);
hipFuncSetCacheConfig(mrt_d_hybBA_force,hipFuncCachePreferL1);
// hipFuncSetCacheConfig(LR_d_hybBACD,hipFuncCachePreferL1);
// hipFuncSetCacheConfig(LR_d_hybBACD2,hipFuncCachePreferL1);
hipFuncSetCacheConfig(LR_d_hybBADC2,hipFuncCachePreferL1);
hipFuncSetCacheConfig(LR_d_hybBADC_Interp,hipFuncCachePreferL1);
hipFuncSetCacheConfig(ExtractFromC_d,hipFuncCachePreferL1);
hipFuncSetCacheConfig(simple_copy,hipFuncCachePreferL1);
struct timeval tdr0,tdr1;
double restime;
hipDeviceSynchronize();
gettimeofday (&tdr0,NULL);
for(int t = 0; t<TMAX; t=t+2){
if(METHOD == "SINGLE"){
hipLaunchKernelGGL(( mrt_d_single), dim3(grid), dim3(threads), 0, 0, fA_d,fB_d,omega,pitch_elements);
if(REFINEMENT == "YES"){
hipLaunchKernelGGL(( LR_d_ABCD), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2);
hipLaunchKernelGGL(( LR_d_ABDC), dim3(grid2), dim3(threads2), 0, 0, fD_d,fC_d,omega2,pitch_elements2,SF_cf);
hipLaunchKernelGGL(( ExtractFromC_d), dim3(grid), dim3(threads), 0, 0, fB_d,pitch_elements,SF_fc);
}
hipLaunchKernelGGL(( mrt_d_single), dim3(grid), dim3(threads), 0, 0, fB_d,fA_d,omega,pitch_elements);
if(REFINEMENT == "YES"){
hipLaunchKernelGGL(( LR_d_BACD), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2);
hipLaunchKernelGGL(( LR_d_BADC), dim3(grid2), dim3(threads2), 0, 0, fD_d,fC_d,omega2,pitch_elements2,SF_cf);
hipLaunchKernelGGL(( ExtractFromC_d), dim3(grid), dim3(threads), 0, 0, fA_d,pitch_elements,SF_fc);
}
}
else if(METHOD == "HYB"){
if(t >= STARTF && REFINEMENT == "NO")
hipLaunchKernelGGL(( mrt_d_hybAB_force), dim3(grid), dim3(threads), 0, 0, fA_d,fB_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t);
else
hipLaunchKernelGGL(( mrt_d_hybAB), dim3(grid), dim3(threads), 0, 0, fA_d,fB_d,omega,pitch_elements);
if(REFINEMENT == "YES"){
if(LRFACTOR == 0.5f)
{
if(t >= STARTF)
hipLaunchKernelGGL(( LR_d_hybABCD_force), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t);
else
hipLaunchKernelGGL(( LR_d_hybABCD), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2);
// hipLaunchKernelGGL(( LR_d_hybABDC_Interp), dim3(grid2), dim3(threads2), 0, 0, fD_d,fC_d,omega2,pitch_elements2,SF_cf);
}
else if(LRFACTOR == 0.25f)
{
if(t >= STARTF)
hipLaunchKernelGGL(( LR_d_hybABCD_force), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t);
else
hipLaunchKernelGGL(( LR_d_hybABCD), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2);
hipLaunchKernelGGL(( LR_d_hybABDC2), dim3(grid2), dim3(threads2), 0, 0, fD_d,fC_d,omega2,pitch_elements2,SF_cf,2);
hipLaunchKernelGGL(( LR_d_hybABCD2), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,3);
hipLaunchKernelGGL(( LR_d_hybABDC_Interp), dim3(grid2), dim3(threads2), 0, 0, fD_d,fC_d,omega2,pitch_elements2,SF_cf);
}
hipLaunchKernelGGL(( ExtractFromC_d), dim3(grid), dim3(threads), 0, 0, fB_d,pitch_elements,SF_fc);
}
if(t >= STARTF && REFINEMENT == "NO")
hipLaunchKernelGGL(( mrt_d_hybBA_force), dim3(grid), dim3(threads), 0, 0, fB_d,fA_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t+1);
else
hipLaunchKernelGGL(( mrt_d_hybBA), dim3(grid), dim3(threads), 0, 0, fB_d,fA_d,omega,pitch_elements);
if(REFINEMENT == "YES"){
if(LRFACTOR == 0.5f)
{
// if(t >= STARTF)
// hipLaunchKernelGGL(( LR_d_hybABCD_force), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t+1);
// else
// hipLaunchKernelGGL(( LR_d_hybABCD), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2);
//
// hipLaunchKernelGGL(( LR_d_hybBADC_Interp), dim3(grid2), dim3(threads2), 0, 0, fD_d,fC_d,omega2,pitch_elements2,SF_cf);
}
else if(LRFACTOR == 0.25f)
{
if(t >= STARTF)
hipLaunchKernelGGL(( LR_d_hybABCD_force), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t+1);
else
hipLaunchKernelGGL(( LR_d_hybABCD), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2);
hipLaunchKernelGGL(( LR_d_hybBADC2), dim3(grid2), dim3(threads2), 0, 0, fD_d,fC_d,omega2,pitch_elements2,SF_cf,2);
hipLaunchKernelGGL(( LR_d_hybABCD2), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,3);
hipLaunchKernelGGL(( LR_d_hybBADC_Interp), dim3(grid2), dim3(threads2), 0, 0, fD_d,fC_d,omega2,pitch_elements2,SF_cf);
}
hipLaunchKernelGGL(( ExtractFromC_d), dim3(grid), dim3(threads), 0, 0, fA_d,pitch_elements,SF_fc);
}
}
// else if(METHOD == "TEXT"){
// mrt_d_textAB<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements);
// mrt_d_textBA<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements);
// }
//
// else if(METHOD == "SHARED"){
// mrt_d_shared<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements);
// mrt_d_shared<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements);
// }
// simple_copy<<<grid, threads>>>(fA_d,fB_d,pitch_elements);
// simple_copy<<<grid, threads>>>(fB_d,fA_d,pitch_elements);
//
// simple_text<<<grid, threads>>>(fA_d,fB_d,pitch_elements);
// simple_text<<<grid, threads>>>(fB_d,fA_d,pitch_elements);
if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n";
}
hipDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
if(REFINEMENT == "YES"){
Nodes = (XDIM*YDIM*ZDIM+XLRDIM*YLRDIM*ZLRDIM*LRLEVEL);
}
else{
Nodes = XDIM*YDIM*ZDIM;
}
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)"<<endl;
// <<double((XDIM*YDIM*ZDIM)*double(TMAX/1000000.f))/restime<<"MLUPS)"<<endl;
cout<<XDIM<<","<<YDIM<<","<<ZDIM<<","<<TMAX<<","<<restime<<endl;
if(true){
hipUnbindTexture(texRef_f0A);
hipUnbindTexture(texRef_f1A);
hipUnbindTexture(texRef_f2A);
hipUnbindTexture(texRef_f3A);
hipUnbindTexture(texRef_f4A);
hipUnbindTexture(texRef_f5A);
hipUnbindTexture(texRef_f6A);
hipUnbindTexture(texRef_f7A);
hipUnbindTexture(texRef_f8A);
hipUnbindTexture(texRef_f9A);
hipUnbindTexture(texRef_f10A);
hipUnbindTexture(texRef_f11A);
hipUnbindTexture(texRef_f12A);
hipUnbindTexture(texRef_f13A);
hipUnbindTexture(texRef_f14A);
hipUnbindTexture(texRef_f15A);
hipUnbindTexture(texRef_f16A);
hipUnbindTexture(texRef_f17A);
hipUnbindTexture(texRef_f18A);
hipUnbindTexture(texRef_f0B);
hipUnbindTexture(texRef_f1B);
hipUnbindTexture(texRef_f2B);
hipUnbindTexture(texRef_f3B);
hipUnbindTexture(texRef_f4B);
hipUnbindTexture(texRef_f5B);
hipUnbindTexture(texRef_f6B);
hipUnbindTexture(texRef_f7B);
hipUnbindTexture(texRef_f8B);
hipUnbindTexture(texRef_f9B);
hipUnbindTexture(texRef_f10B);
hipUnbindTexture(texRef_f11B);
hipUnbindTexture(texRef_f12B);
hipUnbindTexture(texRef_f13B);
hipUnbindTexture(texRef_f14B);
hipUnbindTexture(texRef_f15B);
hipUnbindTexture(texRef_f16B);
hipUnbindTexture(texRef_f17B);
hipUnbindTexture(texRef_f18B);
hipUnbindTexture(texRef_f0C);
hipUnbindTexture(texRef_f1C);
hipUnbindTexture(texRef_f2C);
hipUnbindTexture(texRef_f3C);
hipUnbindTexture(texRef_f4C);
hipUnbindTexture(texRef_f5C);
hipUnbindTexture(texRef_f6C);
hipUnbindTexture(texRef_f7C);
hipUnbindTexture(texRef_f8C);
hipUnbindTexture(texRef_f9C);
hipUnbindTexture(texRef_f10C);
hipUnbindTexture(texRef_f11C);
hipUnbindTexture(texRef_f12C);
hipUnbindTexture(texRef_f13C);
hipUnbindTexture(texRef_f14C);
hipUnbindTexture(texRef_f15C);
hipUnbindTexture(texRef_f16C);
hipUnbindTexture(texRef_f17C);
hipUnbindTexture(texRef_f18C);
hipUnbindTexture(texRef_f0D);
hipUnbindTexture(texRef_f1D);
hipUnbindTexture(texRef_f2D);
hipUnbindTexture(texRef_f3D);
hipUnbindTexture(texRef_f4D);
hipUnbindTexture(texRef_f5D);
hipUnbindTexture(texRef_f6D);
hipUnbindTexture(texRef_f7D);
hipUnbindTexture(texRef_f8D);
hipUnbindTexture(texRef_f9D);
hipUnbindTexture(texRef_f10D);
hipUnbindTexture(texRef_f11D);
hipUnbindTexture(texRef_f12D);
hipUnbindTexture(texRef_f13D);
hipUnbindTexture(texRef_f14D);
hipUnbindTexture(texRef_f15D);
hipUnbindTexture(texRef_f16D);
hipUnbindTexture(texRef_f17D);
hipUnbindTexture(texRef_f18D);
}
hipMemcpy2D(fA_h,XDIM*sizeof(float),fA_d,pitch,XDIM*sizeof(float),YDIM*ZDIM*19,hipMemcpyDeviceToHost);
if(REFINEMENT == "YES"){
hipMemcpy2D(fC_h,XLRDIM*sizeof(float),fD_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM*19,hipMemcpyDeviceToHost);
}
hipMemcpy(FX_h, FX_d, TMAX*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(FY_h, FY_d, TMAX*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(FZ_h, FZ_d, TMAX*sizeof(float), hipMemcpyDeviceToHost);
//hipMemcpy(image_h, image_d, memsize_int, hipMemcpyDeviceToHost);
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM<<"\n";
int row = 0;
int col = 0;
int dep = 0;
i = 0;
float rho, u, v, w;
int j;
for(dep = 0; dep<ZDIM; dep++){
for(row = 0; row<YDIM; row++){
for(col = 0; col<XDIM; col++){
i = dep*XDIM*YDIM+row*XDIM+col;
// rho = 0;
rho = fA_h[i];
for(j = 1; j<19; j++)
rho+=fA_h[i+XDIM*YDIM*ZDIM*j];
// rho = f0_h[i]+f1_h[i]+f2_h[i]+f3_h[i]+f4_h[i]+f5_h[i]+f6_h[i]+f7_h[i]+f8_h[i]+f9_h[i]+
// f10_h[i]+f11_h[i]+f12_h[i]+f13_h[i]+f14_h[i]+f15_h[i]+f16_h[i]+f17_h[i]+f18_h[i];
u = fA_h[i+XDIM*YDIM*ZDIM*1]-fA_h[i+XDIM*YDIM*ZDIM*3]+fA_h[i+XDIM*YDIM*ZDIM*5]-fA_h[i+XDIM*YDIM*ZDIM*6]-
fA_h[i+XDIM*YDIM*ZDIM*7]+fA_h[i+XDIM*YDIM*ZDIM*8]+fA_h[i+XDIM*YDIM*ZDIM*10]-fA_h[i+XDIM*YDIM*ZDIM*12]
+fA_h[i+XDIM*YDIM*ZDIM*15]-fA_h[i+XDIM*YDIM*ZDIM*17];
v = fA_h[i+XDIM*YDIM*ZDIM*2]-fA_h[i+XDIM*YDIM*ZDIM*4]+fA_h[i+XDIM*YDIM*ZDIM*5]+fA_h[i+XDIM*YDIM*ZDIM*6]-fA_h[i+XDIM*YDIM*ZDIM*7]-fA_h[i+XDIM*YDIM*ZDIM*8]+fA_h[i+XDIM*YDIM*ZDIM*11]-fA_h[i+XDIM*YDIM*ZDIM*13]+fA_h[i+XDIM*YDIM*ZDIM*16]-fA_h[i+XDIM*YDIM*ZDIM*18];
w = fA_h[i+XDIM*YDIM*ZDIM*9]+fA_h[i+XDIM*YDIM*ZDIM*10]+fA_h[i+XDIM*YDIM*ZDIM*11]+fA_h[i+XDIM*YDIM*ZDIM*12]+fA_h[i+XDIM*YDIM*ZDIM*13]-fA_h[i+XDIM*YDIM*ZDIM*14]-fA_h[i+XDIM*YDIM*ZDIM*15]-fA_h[i+XDIM*YDIM*ZDIM*16]-fA_h[i+XDIM*YDIM*ZDIM*17]-fA_h[i+XDIM*YDIM*ZDIM*18];
output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<w<<","<<rho<<endl;
// output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<fA_h[i+XDIM*YDIM*ZDIM*1]<<","<<rho<<endl;
}
}
}
if(REFINEMENT == "YES"){
output<<endl<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\"\n";
output<<"ZONE F=POINT, I="<<XLRDIM-0<<", J="<<YLRDIM-0<<", K="<<ZLRDIM-0<<"\n";
for(dep = 0; dep<ZLRDIM-0; dep++){
for(row = 0; row<YLRDIM-0; row++){
for(col = 0; col<XLRDIM-0; col++){
i = dep*XLRDIM*YLRDIM+row*XLRDIM+col;
rho = fC_h[i];
for(j = 1; j<19; j++)
rho+=fC_h[i+XLRDIM*YLRDIM*ZLRDIM*j];
u = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*1]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*3]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*5]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*6]-
fC_h[i+XLRDIM*YLRDIM*ZLRDIM*7]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*8]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*10]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*12]
+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*15]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*17];
v = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*2]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*4]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*5]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*6]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*7]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*8]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*11]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*13]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*16]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*18];
w = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*9]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*10]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*11]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*12]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*13]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*14]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*15]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*16]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*17]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*18];
// u=2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*1]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*3 ]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*5 ]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*8 ] ;
// v=2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*2]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*4 ]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*5 ]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*8 ] ;
// w=2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*9]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*10]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*11]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*12] ;
//
// u=-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*6 ]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*7 ]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*10]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*12]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*15]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*17];
// v=+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*6 ]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*7 ]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*11]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*13]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*16]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*18];
// w=+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*13]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*14]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*15]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*16]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*17]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*18];
// output<<LRX0+col*LRFACTOR<<", "<<LRY0+row*LRFACTOR<<", "<<LRZ0+dep*LRFACTOR<<", "<<u<<","<<v<<","<<w<<","<<rho<<endl;
output<<LRX0+col*LRFACTOR<<", "<<LRY0+row*LRFACTOR<<", "<<LRZ0+dep*LRFACTOR<<", "<<fC_h[i+XLRDIM*YLRDIM*ZLRDIM*1]<<","<<fC_h[i+XLRDIM*YLRDIM*ZLRDIM*2]<<","<<fC_h[i+XLRDIM*YLRDIM*ZLRDIM*3]<<","<<rho<<endl;
}
}
}
}
output.close();
//for(int t = STARTF-1; t<TMAX; t++){
for(int t = 0; t<TMAX; t++){
output2<<t<<", "<<FX_h[t]/(0.5f*UMAX*UMAX*OBSTR*OBSTR*3.14158f)<<", "
<<FY_h[t]/(0.5f*UMAX*UMAX*OBSTR*OBSTR*3.14158f)<<", "
<<FZ_h[t]/(0.5f*UMAX*UMAX*OBSTR*OBSTR*3.14158f)<<endl;
}
output2.close();
//hipFree(image_d);
hipFree(fA_d);
hipFree(fB_d);
hipFree(fC_d);
hipFree(fD_d);
return(0);
}
|
ba6d5d2aa0e6ebf1e7457e5eb970928adb158877.cu
|
#include <cuda.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
#define CASENAME "test"
#define BLOCKSIZEX 64
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 64
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define XDIM 128
#define YDIM 32
#define ZDIM 32
#define TMAX 2
#define STARTF 0
#define OBSTR 1.f
#define OBSTX 64.25f
#define OBSTY 16.25f
#define OBSTZ 16.25f
#define LRFACTOR 0.5f
#define LRLEVEL 2
#define LRX0 31.75f //minimum x coord of LR
#define XLRDIM 128 //number of nodes in x
#define LRY0 7.75f
#define YLRDIM 32
#define LRZ0 7.75f
#define ZLRDIM 32
#define RE 20.f//100.f;
#define UMAX 0.08f
#define METHOD "HYB" //SINGLE,HYB,TEXT,SHARED
#define REFINEMENT "YES" //YES,NO
#define MODEL "MRT" //BGK,MRT,STREAM
//#define CHARLENGTH = XDIM-2.f;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
#include <sys/time.h>
#include <time.h>
/*
Image List:
0 fluid
1 BB
2
3 DirichletWest(simple)
10 BB(force)
13 DirichletWest_Reg
14 NeumannEast_Reg
15 DirichletNorth_Reg
16 DirichletSouth_Reg
21 ysymmetry_top
22 ysymmetry_bot
23 zsymmetry_top
24 zsymmetry_bot
*/
inline __device__ int ImageFcn(float x, float y, float z){
//if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY)*(y-OBSTY))<OBSTR*OBSTR)
if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY)*(y-OBSTY)+(z-OBSTZ)*(z-OBSTZ))<OBSTR*OBSTR)
{
return 10;
}
else
return 10;
//if(y < 0.1f || z < 0.1f || (XDIM-x) < 0.1f || (YDIM-y) < 0.1f || (ZDIM-z) < 0.1f)
// if(y < 17.5f || z < 17.5f || y > 46.5f || z > 46.5f)
// return 1;
// else if(x < 17.5f)
// return 13;
// else if(x > 78.5f)
// return 14;
// else
}
inline __device__ int ImageFcn(int x, int y, int z){
//Cylinder
// if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY)*(y-OBSTY))<OBSTR*OBSTR)
//Sphere
if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY)*(y-OBSTY)+(z-OBSTZ)*(z-OBSTZ))<OBSTR*OBSTR)
{
// if(z == 0 || z == ZDIM-1)
// return 1;
// else
return 10;
}
else if(x == 0)
return 1;//3;
else if(x == XDIM-1)
return 1;//4;
else if(y == 0)
return 1;//22;//22;
else if(y == YDIM-1)
return 1;//21;
else if(z == 0)
return 1;//24;
else if(z == ZDIM-1)
return 1;//23;
else
return 0;
//Lid Driven Cavity
// if(x == 0)
// return 3;
// else if(x == XDIM-1 || y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1)
// return 1;
// else
// return 0;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
// return 1.f;
}
__device__ void DirichletWest(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == YDIM-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = 0.0f;//UMAX;//*PoisProf(zcoord)*1.5;
v = UMAX;//0.0;
w = 0.0f;
//rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
__device__ void DirichletWest_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == YDIM-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(y)*1.5;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f1 = f3+0.33333333f*u;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f5 = f7+0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f10= f17+0.166666667f*(u+w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f15= f12+0.166666667f*(u-w);
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void NeumannEast_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f3 = f1 -0.333333333f*u;
f7 = f5 -0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f17= f10-0.166666667f*(u+w);
f12= f15-0.166666667f*(u-w);
// f3 =(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)+
// (f1-(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2));
// f7 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)+
// (f5-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v));
// f6 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)+
// (f8-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v));
// f17=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)+
// (f10-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w));
// f12=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)+
// (f15-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w));
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletNorth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f4 = f2-0.33333333f*v;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f7 = f5-0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f13= f16-0.166666667f*(v-w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f18= f11-0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletSouth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f2 = f4 +0.33333333f*v;
f5 = f7 +0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f16= f13+0.166666667f*(v-w);
f11= f18+0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void ysymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
__device__ void ysymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
__device__ void zsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
__device__ void zsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
__device__ void boundaries(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 13)//DirichletWest
{
DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 14)//DirichletWest
{
NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
}
texture<float,2,cudaReadModeElementType> texRef_f0A;
texture<float,2,cudaReadModeElementType> texRef_f1A;
texture<float,2,cudaReadModeElementType> texRef_f2A;
texture<float,2,cudaReadModeElementType> texRef_f3A;
texture<float,2,cudaReadModeElementType> texRef_f4A;
texture<float,2,cudaReadModeElementType> texRef_f5A;
texture<float,2,cudaReadModeElementType> texRef_f6A;
texture<float,2,cudaReadModeElementType> texRef_f7A;
texture<float,2,cudaReadModeElementType> texRef_f8A;
texture<float,2,cudaReadModeElementType> texRef_f9A;
texture<float,2,cudaReadModeElementType> texRef_f10A;
texture<float,2,cudaReadModeElementType> texRef_f11A;
texture<float,2,cudaReadModeElementType> texRef_f12A;
texture<float,2,cudaReadModeElementType> texRef_f13A;
texture<float,2,cudaReadModeElementType> texRef_f14A;
texture<float,2,cudaReadModeElementType> texRef_f15A;
texture<float,2,cudaReadModeElementType> texRef_f16A;
texture<float,2,cudaReadModeElementType> texRef_f17A;
texture<float,2,cudaReadModeElementType> texRef_f18A;
texture<float,2,cudaReadModeElementType> texRef_f0B;
texture<float,2,cudaReadModeElementType> texRef_f1B;
texture<float,2,cudaReadModeElementType> texRef_f2B;
texture<float,2,cudaReadModeElementType> texRef_f3B;
texture<float,2,cudaReadModeElementType> texRef_f4B;
texture<float,2,cudaReadModeElementType> texRef_f5B;
texture<float,2,cudaReadModeElementType> texRef_f6B;
texture<float,2,cudaReadModeElementType> texRef_f7B;
texture<float,2,cudaReadModeElementType> texRef_f8B;
texture<float,2,cudaReadModeElementType> texRef_f9B;
texture<float,2,cudaReadModeElementType> texRef_f10B;
texture<float,2,cudaReadModeElementType> texRef_f11B;
texture<float,2,cudaReadModeElementType> texRef_f12B;
texture<float,2,cudaReadModeElementType> texRef_f13B;
texture<float,2,cudaReadModeElementType> texRef_f14B;
texture<float,2,cudaReadModeElementType> texRef_f15B;
texture<float,2,cudaReadModeElementType> texRef_f16B;
texture<float,2,cudaReadModeElementType> texRef_f17B;
texture<float,2,cudaReadModeElementType> texRef_f18B;
texture<float,2,cudaReadModeElementType> texRef_f0C;
texture<float,2,cudaReadModeElementType> texRef_f1C;
texture<float,2,cudaReadModeElementType> texRef_f2C;
texture<float,2,cudaReadModeElementType> texRef_f3C;
texture<float,2,cudaReadModeElementType> texRef_f4C;
texture<float,2,cudaReadModeElementType> texRef_f5C;
texture<float,2,cudaReadModeElementType> texRef_f6C;
texture<float,2,cudaReadModeElementType> texRef_f7C;
texture<float,2,cudaReadModeElementType> texRef_f8C;
texture<float,2,cudaReadModeElementType> texRef_f9C;
texture<float,2,cudaReadModeElementType> texRef_f10C;
texture<float,2,cudaReadModeElementType> texRef_f11C;
texture<float,2,cudaReadModeElementType> texRef_f12C;
texture<float,2,cudaReadModeElementType> texRef_f13C;
texture<float,2,cudaReadModeElementType> texRef_f14C;
texture<float,2,cudaReadModeElementType> texRef_f15C;
texture<float,2,cudaReadModeElementType> texRef_f16C;
texture<float,2,cudaReadModeElementType> texRef_f17C;
texture<float,2,cudaReadModeElementType> texRef_f18C;
texture<float,2,cudaReadModeElementType> texRef_f0D;
texture<float,2,cudaReadModeElementType> texRef_f1D;
texture<float,2,cudaReadModeElementType> texRef_f2D;
texture<float,2,cudaReadModeElementType> texRef_f3D;
texture<float,2,cudaReadModeElementType> texRef_f4D;
texture<float,2,cudaReadModeElementType> texRef_f5D;
texture<float,2,cudaReadModeElementType> texRef_f6D;
texture<float,2,cudaReadModeElementType> texRef_f7D;
texture<float,2,cudaReadModeElementType> texRef_f8D;
texture<float,2,cudaReadModeElementType> texRef_f9D;
texture<float,2,cudaReadModeElementType> texRef_f10D;
texture<float,2,cudaReadModeElementType> texRef_f11D;
texture<float,2,cudaReadModeElementType> texRef_f12D;
texture<float,2,cudaReadModeElementType> texRef_f13D;
texture<float,2,cudaReadModeElementType> texRef_f14D;
texture<float,2,cudaReadModeElementType> texRef_f15D;
texture<float,2,cudaReadModeElementType> texRef_f16D;
texture<float,2,cudaReadModeElementType> texRef_f17D;
texture<float,2,cudaReadModeElementType> texRef_f18D;
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
inline __device__ void bgk_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
f0 = f0 -omega*(f0 -0.3333333333f*(rho-1.5f*usqr));
f1 = f1 -omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f2 = f2 -omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
f3 = f3 -omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
f4 = f4 -omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
f5 = f5 -omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
f6 = f6 -omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
f7 = f7 -omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
f8 = f8 -omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
f9 = f9 -omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
f10= f10-omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
f11= f11-omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
f12= f12-omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
f13= f13-omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
f14= f14-omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
f15= f15-omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
f16= f16-omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
f17= f17-omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
f18= f18-omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
}
inline __device__ void mrt_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
//f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
//f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
//f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
//f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
//f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
//f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
//f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
//f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
//f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
//f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
//f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
//f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
//f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
//f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
//f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
//f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
//f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
//f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
//f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
//f0 -= - 0.012531328f*(m1)+ 0.047619048f*(m2);
//f1 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10);
//f2 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m6) + -0.027777778f*(m9)*omega + 0.027777778f*(m10);
//f3 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10);
//f4 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m6) + -0.027777778f*(m9)*omega + 0.027777778f*(m10);
//f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f9 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10);
//f10 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f11 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
//f12 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f13 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
//f14 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10);
//f15 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f16 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
//f17 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f18 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
//
//f2 -= 0.083333333f*(m11)*omega + -0.083333333f*(m12);
//f4 -= 0.083333333f*(m11)*omega + -0.083333333f*(m12);
//f5 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ ( 0.25f*(m13) )*omega;
//f6 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ (-0.25f*(m13) )*omega;
//f7 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ ( 0.25f*(m13) )*omega;
//f8 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ (-0.25f*(m13) )*omega;
//f9 -= -0.083333333f*(m11)*omega + 0.083333333f*(m12);
//f10 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + 0.25f*(m15))*omega ;
//f11 -= +( 0.25f*(m14) )*omega ;
//f12 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + -0.25f*(m15))*omega ;
//f13 -= +( -0.25f*(m14) )*omega ;
//f14 -= -0.083333333f*(m11)*omega + 0.083333333f*(m12);
//f15 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + -0.25f*(m15))*omega ;
//f16 -= +( -0.25f*(m14) )*omega ;
//f17 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + 0.25f*(m15))*omega ;
//f18 -= +( 0.25f*(m14) )*omega ;
//
//f5 -= 0.125f*(m16)+ -0.125f*(m17);
//f6 -= -0.125f*(m16)+ -0.125f*(m17);
//f7 -= -0.125f*(m16)+ 0.125f*(m17);
//f8 -= 0.125f*(m16)+ 0.125f*(m17);
//f10 -= -0.125f*(m16) + 0.125f*(m18);
//f11 -= + 0.125f*(m17)+ -0.125f*(m18);
//f12 -= 0.125f*(m16) + 0.125f*(m18);
//f13 -= + -0.125f*(m17)+ -0.125f*(m18);
//f15 -= -0.125f*(m16) + -0.125f*(m18);
//f16 -= + 0.125f*(m17)+ 0.125f*(m18);
//f17 -= 0.125f*(m16) + -0.125f*(m18);
//f18 -= + -0.125f*(m17)+ 0.125f*(m18);
}
//{
// float u,v,w;
//// float rho = f1+f2+f4+f6+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
//// u = u*rho;
//// v = v*rho;
//// w = w*rho;
//
// float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//
//
// //COMPUTE M-MEQ
// m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
// m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
// m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
// m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
// m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18 -(2.f*u*u-(v*v+w*w));
// m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
// m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 -(v*v-w*w);
// m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
// m13 = f5+ - f6+ f7+ - f8 -u*v;
// m14 = f11 + - f13 + - f16 + f18 -v*w;
// m15 = f10 + - f12 + - f15 + f17 -u*w;
// m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
// m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
// m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
//
//
//f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
//f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
//f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
//f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
//f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
//f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
//f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
//f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
//f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
//f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
//f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
//f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
//f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
//f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
//f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
//f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
//f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
//f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
//f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
//}
inline __device__ void mrt_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
// u = rho*u;
// v = rho*v;
// w = rho*w;
// float meq0 = rho;
// float meq1 = -11.f*rho+19.f*(u*u+v*v+w*w);
// float meq2 = -7.53968254f*(u*u+v*v+w*w);
// float meq3 = u;
// float meq4 = -0.66666667f*u;//qx_eq
// float meq5 = v;
// float meq6 = -0.66666667f*v;//qx_eq
// float meq7 = w;
// float meq8 = -0.66666667f*w;//qx_eq
// float meq9 = (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq
// float meq11= (v*v-w*w);//pww_eq
// float meq13= u*v;//pxy_eq
// float meq14= v*w;//pyz_eq
// float meq15= u*w;//pxz_eq
float usqr = u*u+v*v+w*w;
//float feq0 = 0.1904761791f*rho+ -0.5971277356f*usqr ;
//float feq1 = 0.1031746045f*rho+ 0.0323759168f*usqr+ (0.166666667f*u) + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.0323759168f*usqr + (0.166666667f*v) +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.0323759168f*usqr+ -(0.166666667f*u) + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.0323759168f*usqr + -(0.166666667f*v) +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.0335726887f*usqr+ 0.1f*(u+v)+ 0.0166666667f*(-u-v) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.0335726887f*usqr+ -0.1f*(u-v)+ -0.0166666667f*(-u+v) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.0335726887f*usqr+ -0.1f*(u+v)+ -0.0166666667f*(-u-v) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.0335726887f*usqr+ 0.1f*(u-v)+ 0.0166666667f*(-u+v) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.0323759168f*usqr + (0.166666667f*w)+-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
//float feq10= 0.0158730149f*rho+ 0.0335726887f*usqr+ 0.1f*(u+w)+ 0.0166666667f*(-u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.0335726887f*usqr + 0.1f*(v+w)+ 0.0166666667f*(-v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.0335726887f*usqr+ -0.1f*(u-w)+ -0.0166666667f*(-u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.0335726887f*usqr + -0.1f*(v-w)+ -0.0166666667f*(-v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.0323759168f*usqr + -(0.166666667f*w)+-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
//float feq15= 0.0158730149f*rho+ 0.0335726887f*usqr+ 0.1f*(u-w)+ 0.0166666667f*(-u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.0335726887f*usqr + 0.1f*(v-w)+ 0.0166666667f*(-v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.0335726887f*usqr+ -0.1f*(u+w)+ -0.0166666667f*(-u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.0335726887f*usqr+ + -0.1f*(v+w)+ -0.0166666667f*(-v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq0 = 0.052631579f*rho+ -0.012531328f*(-11.f*rho+19.f*usqr)+ 0.047619048f*(-7.53968254*usqr) ;
//float feq1 = 0.052631579f*rho+ -0.0045948204f*(-11.f*rho+19.f*usqr)+ -0.015873016f*(-7.53968254*usqr)+ 0.1f*(meq3-meq4) ;
//float feq2 = 0.052631579f*rho+ -0.0045948204f*(-11.f*rho+19.f*usqr)+ -0.015873016f*(-7.53968254*usqr) + 0.1f*(meq5-meq6) ;
//float feq3 = 0.052631579f*rho+ -0.0045948204f*(-11.f*rho+19.f*usqr)+ -0.015873016f*(-7.53968254*usqr)+ -0.1f*(meq3-meq4) ;
//float feq4 = 0.052631579f*rho+ -0.0045948204f*(-11.f*rho+19.f*usqr)+ -0.015873016f*(-7.53968254*usqr) + -0.1f*(meq5-meq6) ;
//float feq5 = 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ 0.1f*(meq3+meq5)+ 0.025f*(meq4+meq6) ;
//float feq6 = 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ -0.1f*(meq3-meq5)+ -0.025f*(meq4-meq6) ;
//float feq7 = 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ -0.1f*(meq3+meq5)+ -0.025f*(meq4+meq6) ;
//float feq8 = 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ 0.1f*(meq3-meq5)+ 0.025f*(meq4-meq6) ;
//float feq9 = 0.052631579f*rho+ -0.0045948204f*(-11.f*rho+19.f*usqr)+ -0.015873016f*(-7.53968254*usqr) + 0.1f*(meq7-meq8);
//float feq10= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ 0.1f*(meq3+meq7)+ 0.025f*(meq4+meq8) ;
//float feq11= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr) + 0.1f*(meq5+meq7)+ 0.025f*(meq6+meq8);
//float feq12= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ -0.1f*(meq3-meq7)+ -0.025f*(meq4-meq8) ;
//float feq13= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr) + -0.1f*(meq5-meq7)+ -0.025f*(meq6-meq8);
//float feq14= 0.052631579f*rho+ -0.0045948204f*(-11.f*rho+19.f*usqr)+ -0.015873016f*(-7.53968254*usqr) + -0.1f*(meq7-meq8);
//float feq15= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ 0.1f*(meq3-meq7)+ 0.025f*(meq4-meq8) ;
//float feq16= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr) + 0.1f*(meq5-meq7)+ 0.025f*(meq6-meq8);
//float feq17= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ -0.1f*(meq3+meq7)+ -0.025f*(meq4+meq8) ;
//float feq18= 0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*(-7.53968254*usqr)+ + -0.1f*(meq5+meq7)+ -0.025f*(meq6+meq8);
//
//feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
//feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
//feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
//feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
//feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
//float u2 = u*u;
//float v2 = v*v;
//float w2 = w*w;
//float usqr = u2+v2+w2;
//
//
//f0 =SF*f0 +(1.0f-SF)*(0.1904761791f*rho+ -0.5971277356f*usqr);
//f1 =SF*f1 +(1.0f-SF)*(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2);
//f2 =SF*f2 +(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+ (0.166666667f*v) +-0.08333333f*u2+ 0.083333333f*(v2-w2));
//f3 =SF*f3 +(1.0f-SF)*(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2);
//f4 =SF*f4 +(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+-(0.166666667f*v) +-0.08333333f*u2+ 0.083333333f*(v2-w2));
//f5 =SF*f5 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v);
//f6 =SF*f6 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v);
//f7 =SF*f7 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v);
//f8 =SF*f8 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v);
//f9 =SF*f9 +(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+ (0.166666667f*w) +-0.08333333f*(u2 +(v2-w2)));//+ -0.083333333f*(v2-w2));
//f10=SF*f10+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w);
//f12=SF*f12+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w);
//f15=SF*f15+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w);
//f17=SF*f17+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w);
//f11=SF*f11+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+ 0.0833333333f*(v+w)+-0.16666667f*u2+ 0.25f*v*w);
//f13=SF*f13+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+-0.0833333333f*(v-w)+-0.16666667f*u2 -0.25f*v*w);
//f16=SF*f16+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+ 0.0833333333f*(v-w)+-0.16666667f*u2+ -0.25f*v*w);
//f18=SF*f18+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+-0.0833333333f*(v+w)+-0.16666667f*u2+ 0.25f*v*w);
//f14=SF*f14+(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+-(0.166666667f*w) +-0.08333333f*(u2 +(v2-w2)));//+ -0.083333333f*(v2-w2));
//f0 =SF*f0 +(1.0f-SF)*(0.1904761791f*rho+ -0.5971277356f*usqr);
//f1 =SF*f1 +(1.0f-SF)*(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2);
//f2 =SF*f2 +(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+ (0.166666667f*v) +-0.08333333f*u2+ 0.083333333f*(v2-w2));
//f3 =SF*f3 +(1.0f-SF)*(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2);
//f4 =SF*f4 +(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+-(0.166666667f*v) +-0.08333333f*u2+ 0.083333333f*(v2-w2));
//f5 =SF*f5 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(u+v)+ 0.08333333f*u2+ 0.083333333f*(v2-w2)+ 0.25f*u*v);
//f6 =SF*f6 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+-0.0833333333f*(u-v)+ 0.08333333f*u2+ 0.083333333f*(v2-w2)+ -0.25f*u*v);
//f7 =SF*f7 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+-0.0833333333f*(u+v)+ 0.08333333f*u2+ 0.083333333f*(v2-w2)+ 0.25f*u*v);
//f8 =SF*f8 +(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(u-v)+ 0.08333333f*u2+ 0.083333333f*(v2-w2)+ -0.25f*u*v);
//f9 =SF*f9 +(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+ (0.166666667f*w) +-0.08333333f*u2+ -0.083333333f*(v2-w2));
//f10=SF*f10+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(u+w)+ 0.08333333f*u2+ -0.083333333f*(v2-w2)+ 0.25f*u*w);
//f11=SF*f11+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+ 0.0833333333f*(v+w)+-0.16666667f*u2+ 0.25f*v*w);
//f12=SF*f12+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+-0.0833333333f*(u-w)+ 0.08333333f*u2+ -0.083333333f*(v2-w2)+ -0.25f*u*w);
//f13=SF*f13+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+-0.0833333333f*(v-w)+-0.16666667f*u2 -0.25f*v*w);
//f14=SF*f14+(1.0f-SF)*(0.1031746045f*rho+ -0.0045981388f*usqr+-(0.166666667f*w) +-0.08333333f*u2+ -0.083333333f*(v2-w2));
//f15=SF*f15+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+0.0833333333f*(u-w) + 0.08333333f*u2+ -0.083333333f*(v2-w2)+ -0.25f*u*w);
//f16=SF*f16+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+ 0.0833333333f*(v-w)+-0.16666667f*u2+ -0.25f*v*w);
//f17=SF*f17+(1.0f-SF)*(0.0158730149f*rho+ 0.00579491071f*usqr+-0.0833333333f*(u+w)+ 0.08333333f*u2+ -0.083333333f*(v2-w2)+ 0.25f*u*w);
//f18=SF*f18+(1.0f-SF)*(0.0158730149f*rho+ 0.02198286727f*usqr+-0.0833333333f*(v+w)+-0.16666667f*u2+ 0.25f*v*w);
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch)
{
return (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*ZDIM;
}
inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch)
{
return (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*ZLRDIM;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__global__ void simple_copy(float* fA, float* fB,
size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int k = dmin(x+1,XDIM)+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
fB[j] = fA[k];//+0.01f;
}
__global__ void simple_text(float* fA, float* fB,
size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
fB[j] = tex2D(texRef_f0A,x+1,y);//+0.01f;
}
__global__ void ExtractFromC_d(float* fout,
size_t pitch, float SF)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// if(x < LRX0+1 || x > LRX0+XLRDIM-2 || y < LRY0+1 || y > LRY0+YLRDIM-2 || z < LRZ0+1 || z > LRZ0+ZLRDIM-2)
// //if(x < LRX0+2 || x > LRX0+XLRDIM-3 || y < LRY0+2 || y > LRY0+YLRDIM-3 || z < LRZ0+2 || z > LRZ0+ZLRDIM-3)
// {
// //do nothing
// }
// else{
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+ZLRDIM*LRFACTOR-1) &&
(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1) || z == int(LRZ0+2) || z == int(LRY0+ZLRDIM*LRFACTOR-1)) )
{
// if(x > 10 && y > 10 && z > 10 && x < 20 && y < 20 && z < 20)
// {
float xcoord = LRLEVEL*(x-LRX0)+0.5f;
float ycoord = LRLEVEL*(y-LRY0)+0.5f;
float zcoord = LRLEVEL*(z-LRZ0);
int zminus = int(zcoord);
int zplus = zminus+1;
f0 = (zplus-zcoord)*tex2D(texRef_f0C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0C ,xcoord,ycoord+YLRDIM*(zplus));
f2 = (zplus-zcoord)*tex2D(texRef_f2C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2C ,xcoord,ycoord+YLRDIM*(zplus));
f4 = (zplus-zcoord)*tex2D(texRef_f4C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4C ,xcoord,ycoord+YLRDIM*(zplus));
f9 = (zplus-zcoord)*tex2D(texRef_f9C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9C ,xcoord,ycoord+YLRDIM*(zplus));
f11= (zplus-zcoord)*tex2D(texRef_f11C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11C,xcoord,ycoord+YLRDIM*(zplus));
f13= (zplus-zcoord)*tex2D(texRef_f13C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13C,xcoord,ycoord+YLRDIM*(zplus));
f14= (zplus-zcoord)*tex2D(texRef_f14C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14C,xcoord,ycoord+YLRDIM*(zplus));
f16= (zplus-zcoord)*tex2D(texRef_f16C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16C,xcoord,ycoord+YLRDIM*(zplus));
f18= (zplus-zcoord)*tex2D(texRef_f18C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18C,xcoord,ycoord+YLRDIM*(zplus));
f1 = (zplus-zcoord)*tex2D(texRef_f1C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1C ,xcoord,ycoord+YLRDIM*(zplus));
f3 = (zplus-zcoord)*tex2D(texRef_f3C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3C ,xcoord,ycoord+YLRDIM*(zplus));
f5 = (zplus-zcoord)*tex2D(texRef_f5C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5C ,xcoord,ycoord+YLRDIM*(zplus));
f6 = (zplus-zcoord)*tex2D(texRef_f6C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6C ,xcoord,ycoord+YLRDIM*(zplus));
f7 = (zplus-zcoord)*tex2D(texRef_f7C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7C ,xcoord,ycoord+YLRDIM*(zplus));
f8 = (zplus-zcoord)*tex2D(texRef_f8C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8C ,xcoord,ycoord+YLRDIM*(zplus));
f15= (zplus-zcoord)*tex2D(texRef_f15C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15C,xcoord,ycoord+YLRDIM*(zplus));
f17= (zplus-zcoord)*tex2D(texRef_f17C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17C,xcoord,ycoord+YLRDIM*(zplus));
f10= (zplus-zcoord)*tex2D(texRef_f10C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10C,xcoord,ycoord+YLRDIM*(zplus));
f12= (zplus-zcoord)*tex2D(texRef_f12C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12C,xcoord,ycoord+YLRDIM*(zplus));
mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF);
// float rho,u,v,w;
// rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
// f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
// float usqr = fma(u,u,fma(v,v,w*w));
//
//// float m1 -= -11.f*rho+19.f*(u*u+v*v+w*w);
//// float m4 = -0.66666667f*u;//qx_eq
//// float m6 = -0.66666667f*v;//qx_eq
//// float m8 = -0.66666667f*w;//qx_eq
//// float m9 -= (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq
//// float m11-= (v*v-w*w);//pww_eq
//// float m13-= u*v;//pxy_eq
//// float m14-= v*w;//pyz_eq
//// float m15-= u*w;//pxz_eq
//
// if(MODEL == "MRT")
// {
// float meq0 = rho;
// float meq1 = -11.f*rho+19.f*(u*u+v*v+w*w);
// float meq2 = -7.53968254f*(u*u+v*v+w*w);
// float meq3 = u;
// float meq4 = -0.66666667f*u;//qx_eq
// float meq5 = v;
// float meq6 = -0.66666667f*v;//qx_eq
// float meq7 = w;
// float meq8 = -0.66666667f*w;//qx_eq
// float meq9 = (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq
// float meq11= (v*v-w*w);//pww_eq
// float meq13= u*v;//pxy_eq
// float meq14= v*w;//pyz_eq
// float meq15= u*w;//pxz_eq
//
//float feq0 = 0.052631579f*meq0+ -0.012531328f*meq1+ 0.047619048f*meq2 ;
//float feq1 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ 0.1f*meq3+ -0.1f*meq4 ;
//float feq2 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + 0.1f*meq5+ -0.1f*meq6 ;
//float feq3 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ -0.1f*meq3+ 0.1f*meq4 ;
//float feq4 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -0.1f*meq5+ 0.1f*meq6 ;
//float feq5 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*meq3+ 0.025f*meq4+ 0.1f*meq5+ 0.025f*meq6 ;
//float feq6 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*meq3+ -0.025f*meq4+ 0.1f*meq5+ 0.025f*meq6 ;
//float feq7 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*meq3+ -0.025f*meq4+ -0.1f*meq5+ -0.025f*meq6 ;
//float feq8 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*meq3+ 0.025f*meq4+ -0.1f*meq5+ -0.025f*meq6 ;
//float feq9 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + 0.1f*meq7+ -0.1f*meq8;
//float feq10= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*meq3+ 0.025f*meq4 + 0.1f*meq7+ 0.025f*meq8;
//float feq11= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*meq5+ 0.025f*meq6+ 0.1f*meq7+ 0.025f*meq8;
//float feq12= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*meq3+ -0.025f*meq4 + 0.1f*meq7+ 0.025f*meq8;
//float feq13= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + -0.1f*meq5+ -0.025f*meq6+ 0.1f*meq7+ 0.025f*meq8;
//float feq14= 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -0.1f*meq7+ 0.1f*meq8;
//float feq15= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*meq3+ 0.025f*meq4 + -0.1f*meq7+ -0.025f*meq8;
//float feq16= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*meq5+ 0.025f*meq6+ -0.1f*meq7+ -0.025f*meq8;
//float feq17= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*meq3+ -0.025f*meq4 + -0.1f*meq7+ -0.025f*meq8;
//float feq18= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ + -0.1f*meq5+ -0.025f*meq6+ -0.1f*meq7+ -0.025f*meq8;
//
//feq1 += 0.055555556f*meq9;
//feq2 += -0.027777778f*meq9+ 0.083333333f*meq11;
//feq3 += 0.055555556f*meq9;
//feq4 += -0.027777778f*meq9+ 0.083333333f*meq11;
//feq5 += 0.027777778f*meq9+ 0.083333333f*meq11+ 0.25f*meq13 ;
//feq6 += 0.027777778f*meq9+ 0.083333333f*meq11+ -0.25f*meq13 ;
//feq7 += 0.027777778f*meq9+ 0.083333333f*meq11+ 0.25f*meq13 ;
//feq8 += 0.027777778f*meq9+ 0.083333333f*meq11+ -0.25f*meq13 ;
//feq9 += -0.027777778f*meq9+ -0.083333333f*meq11 ;
//feq10+= 0.027777778f*meq9+ -0.083333333f*meq11 + 0.25f*meq15;
//feq11+= -0.055555556f*meq9 + 0.25f*meq14 ;
//feq12+= 0.027777778f*meq9+ -0.083333333f*meq11 + -0.25f*meq15;
//feq13+= -0.055555556f*meq9 -0.25f*meq14 ;
//feq14+= -0.027777778f*meq9+ -0.083333333f*meq11 ;
//feq15+= 0.027777778f*meq9+ -0.083333333f*meq11 + -0.25f*meq15;
//feq16+= -0.055555556f*meq9 + -0.25f*meq14 ;
//feq17+= 0.027777778f*meq9+ -0.083333333f*meq11 + 0.25f*meq15;
//feq18+= -0.055555556f*meq9 + 0.25f*meq14 ;
//
//
//f0 =SF*f0 +(1.0f-SF)*feq0 ;
//f1 =SF*f1 +(1.0f-SF)*feq1 ;
//f2 =SF*f2 +(1.0f-SF)*feq2 ;
//f3 =SF*f3 +(1.0f-SF)*feq3 ;
//f4 =SF*f4 +(1.0f-SF)*feq4 ;
//f5 =SF*f5 +(1.0f-SF)*feq5 ;
//f6 =SF*f6 +(1.0f-SF)*feq6 ;
//f7 =SF*f7 +(1.0f-SF)*feq7 ;
//f8 =SF*f8 +(1.0f-SF)*feq8 ;
//f9 =SF*f9 +(1.0f-SF)*feq9 ;
//f10=SF*f10+(1.0f-SF)*feq10;
//f11=SF*f11+(1.0f-SF)*feq11;
//f12=SF*f12+(1.0f-SF)*feq12;
//f13=SF*f13+(1.0f-SF)*feq13;
//f14=SF*f14+(1.0f-SF)*feq14;
//f15=SF*f15+(1.0f-SF)*feq15;
//f16=SF*f16+(1.0f-SF)*feq16;
//f17=SF*f17+(1.0f-SF)*feq17;
//f18=SF*f18+(1.0f-SF)*feq18;
//
//
//
//
//
//
//// float m2 = -7.53968254f*(u*u+v*v+w*w);
//// //scale
//// f0 =SF*f0 +(1.0f-SF)*(0.052631579f*rho+ -0.012531328f*(-11.f*rho+19.f*usqr)+ 0.047619048f*m2);
//// f1 =SF*f1 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
//// f2 =SF*f2 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
//// f3 =SF*f3 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
//// f4 =SF*f4 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
//// f5 =SF*f5 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+v)+0.01666666667f*(-u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
//// f6 =SF*f6 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+v)+0.01666666667f*( u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
//// f7 =SF*f7 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-v)+0.01666666667f*( u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
//// f8 =SF*f8 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-v)+0.01666666667f*(-u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
//// f9 =SF*f9 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*w) -0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
//// f10=SF*f10+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+w)+0.01666666667f*(-u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f11=SF*f11+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
//// f12=SF*f12+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+w)+0.01666666667f*( u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) -0.25f*u*v);
//// f13=SF*f13+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w );
//// f14=SF*f14+(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
//// f15=SF*f15+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-w)+0.01666666667f*(-u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f16=SF*f16+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
//// f17=SF*f17+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-w)+0.01666666667f*( u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f18=SF*f18+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// }
// else
// {
//// //scale
////f0 =SF*f0 +(1.0f-SF)*(0.052631579f*rho+ -0.012531328f*(-11.f*rho+19.f*usqr)+ 0.047619048f*m2);
////f1 =SF*f1 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +0.1f*(u+0.666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
////f2 =SF*f2 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +0.1f*(v+0.666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
////f3 =SF*f3 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +0.1f*(-0.666666667f*u-u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
////f4 =SF*f4 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +0.1f*(-0.666666667f*v-v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
////f5 =SF*f5 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+v)+0.025f*( -0.666666667f*u+-0.666666667f*v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
////f6 =SF*f6 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+v)+0.025f*(+0.666666667f*u+-0.666666667f*v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
////f7 =SF*f7 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-v)+0.025f*(+0.666666667f*u+0.666666667f*v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
////f8 =SF*f8 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-v)+0.025f*( -0.666666667f*u+0.666666667f*v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
////f9 =SF*f9 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +0.1f*(w+0.666666667f*w) -0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
////f10=SF*f10+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+w)+0.025f*( -0.666666667f*u+-0.666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
////f11=SF*f11+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v+w)+0.025f*( -0.666666667f*v+-0.666666667f*w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
////f12=SF*f12+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+w)+0.025f*(+0.666666667f*u+-0.666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) -0.25f*u*v);
////f13=SF*f13+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v+w)+0.025f*( -0.666666667f*v+-0.666666667f*w) -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w );
////f14=SF*f14+(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +0.1f*(-0.666666667f*w-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
////f15=SF*f15+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-w)+0.025f*( -0.666666667f*u+0.666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
////f16=SF*f16+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v-w)+0.025f*( -0.666666667f*v+0.666666667f*w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
////f17=SF*f17+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-w)+0.025f*(+0.666666667f*u+0.666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
////f18=SF*f18+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v-w)+0.025f*( -0.666666667f*v+0.666666667f*w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// //scale
// f0 =SF*f0 +(1.0f-SF)*(0.3333333333f*(rho-1.5f*usqr));
// f1 =SF*f1 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =SF*f2 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =SF*f3 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =SF*f4 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =SF*f5 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =SF*f6 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =SF*f7 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =SF*f8 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =SF*f9 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=SF*f10+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=SF*f11+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=SF*f12+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=SF*f13+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=SF*f14+(1.0f-SF)*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=SF*f15+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=SF*f16+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=SF*f17+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=SF*f18+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
// }
fout[f_mem(0 ,x,y,z,pitch)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch)] = f9 ;
fout[f_mem(10,x,y,z,pitch)] = f10;
fout[f_mem(11,x,y,z,pitch)] = f11;
fout[f_mem(12,x,y,z,pitch)] = f12;
fout[f_mem(13,x,y,z,pitch)] = f13;
fout[f_mem(14,x,y,z,pitch)] = f14;
fout[f_mem(15,x,y,z,pitch)] = f15;
fout[f_mem(16,x,y,z,pitch)] = f16;
fout[f_mem(17,x,y,z,pitch)] = f17;
fout[f_mem(18,x,y,z,pitch)] = f18;
}
}
__global__ void LR_d_hybABCD_force(float* fin, float* fout,
float omega, size_t pitch, float *FX, float *FY, float *FZ, int t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
// __shared__ int check[1];
// check[0] = 0;
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
syncthreads();
// if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2)
// {
// //dont do anything
// sumX[threadIdx.x]=0.f;
// sumY[threadIdx.x]=0.f;
// sumZ[threadIdx.x]=0.f;
// }
// else{
// f0 = fin[j];
// f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
// f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
// f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
// f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
// f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
// f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
// f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
// if(z != ZLRDIM-1){
// f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
// }
// f1 = tex2D(texRef_f1C ,x-1+0.5f,y +0.5f+YLRDIM*(z));
// f3 = tex2D(texRef_f3C ,x+1+0.5f,y +0.5f+YLRDIM*(z));
// f5 = tex2D(texRef_f5C ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
// f6 = tex2D(texRef_f6C ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
// f7 = tex2D(texRef_f7C ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
// f8 = tex2D(texRef_f8C ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
// f15= tex2D(texRef_f15C,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
// f17= tex2D(texRef_f17C,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
// f10= tex2D(texRef_f10C,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
// f12= tex2D(texRef_f12C,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
//
//
f1 = 0.1f;//fin[f_memLR(1 ,x,y,z,pitch)];
f3 = 0.1f;//fin[f_memLR(3 ,x,y,z,pitch)];
f2 = 0.1f;//fin[f_memLR(2 ,x,y,z,pitch)];
f5 = 0.1f;//fin[f_memLR(5 ,x,y,z,pitch)];
f6 = 0.1f;//fin[f_memLR(6 ,x,y,z,pitch)];
f4 = 0.1f;//fin[f_memLR(4 ,x,y,z,pitch)];
f7 = 0.1f;//fin[f_memLR(7 ,x,y,z,pitch)];
f8 = 0.1f;//fin[f_memLR(8 ,x,y,z,pitch)];
f9 = 0.1f;//fin[f_memLR(9 ,x,y,z,pitch)];
f10= 0.1f;//fin[f_memLR(10,x,y,z,pitch)];
f11= 0.1f;//fin[f_memLR(11,x,y,z,pitch)];
f12= 0.1f;//fin[f_memLR(12,x,y,z,pitch)];
f13= 0.1f;//fin[f_memLR(13,x,y,z,pitch)];
f14= 0.1f;//fin[f_memLR(14,x,y,z,pitch)];
f15= 0.1f;//fin[f_memLR(15,x,y,z,pitch)];
f16= 0.1f;//fin[f_memLR(16,x,y,z,pitch)];
f17= 0.1f;//fin[f_memLR(17,x,y,z,pitch)];
f18= 0.1f;//fin[f_memLR(18,x,y,z,pitch)];
syncthreads();
// if(im == 1 || im ==10){//BB
// if(im == 10){
// check[0] = 1;
float temp = 2.f*(f9+f10+f11);
temp += 2.f*(-f14-f15-f16);
sumX[threadIdx.x]=2.f*f1-2.f*f3 +2.f*f5 +2.f*f8 -2.f*f6 -2.f*f7 +2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4 +2.f*f5 -2.f*f8 +2.f*f6 -2.f*f7 +2.f*f11-2.f*f13+2.f*f16-2.f*f18;
//sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
//sumZ[threadIdx.x]=2.f*(f9+f10+f11-f14-f15-f16);
sumZ[threadIdx.x]=temp;//f9+f10+f11;//-f15-f16;
//sumZ[threadIdx.x]+=-f14-f15-f16;
// }
// else{
// sumX[threadIdx.x]=0.f;
// sumY[threadIdx.x]=0.f;
// sumZ[threadIdx.x]=0.f;
// }
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
syncthreads();
fout[f_memLR(1 ,x,y,z,pitch)] = sumX[threadIdx.x];
fout[f_memLR(2 ,x,y,z,pitch)] = sumY[threadIdx.x];
fout[f_memLR(3 ,x,y,z,pitch)] = sumZ[threadIdx.x];
// }
// else{
// sumX[threadIdx.x]=0.f;
// sumY[threadIdx.x]=0.f;
// sumZ[threadIdx.x]=0.f;
//// if(im == 3)//DirichletWest
//// {
//// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//// }
//// else if(im == 13)//DirichletWest
//// {
//// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//// }
//// else if(im == 14)//DirichletWest
//// {
//// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//// }
// boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
//
//
// if(MODEL == "MRT")
// mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
// else if(MODEL == "BGK")
// bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
//
// fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
// fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
// fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
// fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
// fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
// fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
// fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
// fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
// fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
// fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
// fout[f_memLR(10,x,y,z,pitch)] = f10;
// fout[f_memLR(11,x,y,z,pitch)] = f11;
// fout[f_memLR(12,x,y,z,pitch)] = f12;
// fout[f_memLR(13,x,y,z,pitch)] = f13;
// fout[f_memLR(14,x,y,z,pitch)] = f14;
// fout[f_memLR(15,x,y,z,pitch)] = f15;
// fout[f_memLR(16,x,y,z,pitch)] = f16;
// fout[f_memLR(17,x,y,z,pitch)] = f17;
// fout[f_memLR(18,x,y,z,pitch)] = f18;
// }
// }//end else (not at edge of LR)
syncthreads();
// if(check[0] == 1 && t>=STARTF){
// //reduction for force
// int nTotalThreads = blockDim.x;
// while(nTotalThreads > 1){
// int halfPoint = (nTotalThreads >> 1);
// if(threadIdx.x < halfPoint){
// sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
// sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
// sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
// }
// syncthreads();
// nTotalThreads = halfPoint;
// }
// if(threadIdx.x == 0){
// atomicAdd(&FX[t],sumX[0]);
// atomicAdd(&FY[t],sumY[0]);
// atomicAdd(&FZ[t],sumZ[0]);
// }
// }
}
__global__ void LR_d_hybBACD_force(float* fin, float* fout,
float omega, size_t pitch, float *FX, float *FY, float *FZ, int t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2)
{
//dont do anything
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1C ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3C ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5C ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6C ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7C ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8C ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15C,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17C,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10C,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12C,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
syncthreads();
if(check[0] == 1 && t>=STARTF){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t],sumX[0]);
atomicAdd(&FY[t],sumY[0]);
atomicAdd(&FZ[t],sumZ[0]);
}
}
}
__global__ void LR_d_hybABCD(float* fin, float* fout,
float omega, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2)
{
//dont do anything
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1C ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3C ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5C ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6C ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7C ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8C ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15C,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17C,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10C,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12C,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_hybABCD2(float* fin, float* fout,
float omega, size_t pitch, int n)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < n || x > XLRDIM-1-n || y < n || y > YLRDIM-1-n || z < n || z > ZLRDIM-1-n)
{
//dont do anything
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1C ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3C ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5C ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6C ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7C ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8C ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15C,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17C,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10C,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12C,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_hybABDC(float* fin, float* fout,
float omega, size_t pitch, float SF)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered.
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation
// int zminus = int(zcoord);
// int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < 2 || x > XLRDIM-3 || y < 2 || y > YLRDIM-3 || z < 2 || z > ZLRDIM-3)
{
//no interp
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1D ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3D ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5D ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6D ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7D ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8D ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15D,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17D,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10D,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12D,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_hybABDC2(float* fin, float* fout,
float omega, size_t pitch, float SF, int n)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered.
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation
// int zminus = int(zcoord);
// int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < n || x > XLRDIM-1-n || y < n || y > YLRDIM-1-n || z < n || z > ZLRDIM-1-n)
{
//no interp
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1D ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3D ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5D ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6D ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7D ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8D ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15D,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17D,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10D,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12D,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_hybABDC_Interp(float* fin, float* fout,
float omega, size_t pitch, float SF)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered.
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation
int zminus = int(zcoord);
int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL || z < LRLEVEL || z > ZLRDIM-1-LRLEVEL)
{
//interpolate for next time step. from B //YDIM and not YLRDIM
f0 = (zplus-zcoord)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f2 = (zplus-zcoord)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f4 = (zplus-zcoord)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f9 = (zplus-zcoord)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f11= (zplus-zcoord)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f13= (zplus-zcoord)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f14= (zplus-zcoord)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f16= (zplus-zcoord)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f18= (zplus-zcoord)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f1 = (zplus-zcoord)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f3 = (zplus-zcoord)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f5 = (zplus-zcoord)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f6 = (zplus-zcoord)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f7 = (zplus-zcoord)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f8 = (zplus-zcoord)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f15= (zplus-zcoord)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f17= (zplus-zcoord)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f10= (zplus-zcoord)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f12= (zplus-zcoord)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF);
// float rho,u,v,w;
// rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
// f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
//// u = rho*u;
//// v = rho*v;
//// w = rho*w;
// float usqr = fma(u,u,fma(v,v,w*w));
//
// if(MODEL == "MRT")
// {
// float meq0 = rho;
// float meq1 = -11.f*rho+19.f*(u*u+v*v+w*w);
// float meq2 = -7.53968254f*(u*u+v*v+w*w);
// float meq3 = u;
// float meq4 = -0.66666667f*u;//qx_eq
// float meq5 = v;
// float meq6 = -0.66666667f*v;//qx_eq
// float meq7 = w;
// float meq8 = -0.66666667f*w;//qx_eq
// float meq9 = (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq
// float meq11= (v*v-w*w);//pww_eq
// float meq13= u*v;//pxy_eq
// float meq14= v*w;//pyz_eq
// float meq15= u*w;//pxz_eq
//
////float feq0 = 0.052631579f*meq0+ -0.012531328f*meq1+ 0.047619048f*meq2 ;
////float feq1 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ (0.166666667f*u) ;
////float feq2 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + (0.166666667f*v) ;
////float feq3 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ -(0.166666667f*u) ;
////float feq4 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -(0.166666667f*v) ;
////float feq5 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3+meq5)+ 0.025f*(meq4+meq6) ;
////float feq6 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3-meq5)+ -0.025f*(meq4-meq6) ;
////float feq7 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3+meq5)+ -0.025f*(meq4+meq6) ;
////float feq8 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3-meq5)+ 0.025f*(meq4-meq6) ;
////float feq9 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + (0.166666667f*w);
////float feq10= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3+meq7)+ 0.025f*(meq4+meq8) ;
////float feq11= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*(meq5+meq7)+ 0.025f*(meq6+meq8);
////float feq12= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3-meq7)+ -0.025f*(meq4-meq8) ;
////float feq13= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + -0.1f*(meq5-meq7)+ -0.025f*(meq6-meq8);
////float feq14= 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -(0.166666667f*w);
////float feq15= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3-meq7)+ 0.025f*(meq4-meq8) ;
////float feq16= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*(meq5-meq7)+ 0.025f*(meq6-meq8);
////float feq17= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3+meq7)+ -0.025f*(meq4+meq8) ;
////float feq18= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ + -0.1f*(meq5+meq7)+ -0.025f*(meq6+meq8);
//
//float feq0 = 0.052631579f*meq0+ -0.012531328f*meq1+ 0.047619048f*meq2 ;
//float feq1 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ 0.1f*(meq3-meq4) ;
//float feq2 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + 0.1f*(meq5-meq6) ;
//float feq3 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ -0.1f*(meq3-meq4) ;
//float feq4 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -0.1f*(meq5-meq6) ;
//float feq5 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3+meq5)+ 0.025f*(meq4+meq6) ;
//float feq6 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3-meq5)+ -0.025f*(meq4-meq6) ;
//float feq7 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3+meq5)+ -0.025f*(meq4+meq6) ;
//float feq8 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3-meq5)+ 0.025f*(meq4-meq6) ;
//float feq9 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + 0.1f*(meq7-meq8);
//float feq10= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3+meq7)+ 0.025f*(meq4+meq8) ;
//float feq11= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*(meq5+meq7)+ 0.025f*(meq6+meq8);
//float feq12= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3-meq7)+ -0.025f*(meq4-meq8) ;
//float feq13= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + -0.1f*(meq5-meq7)+ -0.025f*(meq6-meq8);
//float feq14= 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -0.1f*(meq7-meq8);
//float feq15= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3-meq7)+ 0.025f*(meq4-meq8) ;
//float feq16= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*(meq5-meq7)+ 0.025f*(meq6-meq8);
//float feq17= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3+meq7)+ -0.025f*(meq4+meq8) ;
//float feq18= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ + -0.1f*(meq5+meq7)+ -0.025f*(meq6+meq8);
//
//feq1 += 0.055555556f*meq9;
//feq2 += -0.027777778f*meq9+ 0.083333333f*meq11;
//feq3 += 0.055555556f*meq9;
//feq4 += -0.027777778f*meq9+ 0.083333333f*meq11;
//feq5 += 0.027777778f*meq9+ 0.083333333f*meq11+ 0.25f*meq13 ;
//feq6 += 0.027777778f*meq9+ 0.083333333f*meq11+ -0.25f*meq13 ;
//feq7 += 0.027777778f*meq9+ 0.083333333f*meq11+ 0.25f*meq13 ;
//feq8 += 0.027777778f*meq9+ 0.083333333f*meq11+ -0.25f*meq13 ;
//feq9 += -0.027777778f*meq9+ -0.083333333f*meq11 ;
//feq10+= 0.027777778f*meq9+ -0.083333333f*meq11 + 0.25f*meq15;
//feq11+= -0.055555556f*meq9 + 0.25f*meq14 ;
//feq12+= 0.027777778f*meq9+ -0.083333333f*meq11 + -0.25f*meq15;
//feq13+= -0.055555556f*meq9 -0.25f*meq14 ;
//feq14+= -0.027777778f*meq9+ -0.083333333f*meq11 ;
//feq15+= 0.027777778f*meq9+ -0.083333333f*meq11 + -0.25f*meq15;
//feq16+= -0.055555556f*meq9 + -0.25f*meq14 ;
//feq17+= 0.027777778f*meq9+ -0.083333333f*meq11 + 0.25f*meq15;
//feq18+= -0.055555556f*meq9 + 0.25f*meq14 ;
//
//
//f0 =SF*f0 +(1.0f-SF)*feq0 ;
//f1 =SF*f1 +(1.0f-SF)*feq1 ;
//f2 =SF*f2 +(1.0f-SF)*feq2 ;
//f3 =SF*f3 +(1.0f-SF)*feq3 ;
//f4 =SF*f4 +(1.0f-SF)*feq4 ;
//f5 =SF*f5 +(1.0f-SF)*feq5 ;
//f6 =SF*f6 +(1.0f-SF)*feq6 ;
//f7 =SF*f7 +(1.0f-SF)*feq7 ;
//f8 =SF*f8 +(1.0f-SF)*feq8 ;
//f9 =SF*f9 +(1.0f-SF)*feq9 ;
//f10=SF*f10+(1.0f-SF)*feq10;
//f11=SF*f11+(1.0f-SF)*feq11;
//f12=SF*f12+(1.0f-SF)*feq12;
//f13=SF*f13+(1.0f-SF)*feq13;
//f14=SF*f14+(1.0f-SF)*feq14;
//f15=SF*f15+(1.0f-SF)*feq15;
//f16=SF*f16+(1.0f-SF)*feq16;
//f17=SF*f17+(1.0f-SF)*feq17;
//f18=SF*f18+(1.0f-SF)*feq18;
//
//
//
//
//
//
//// float m2 = -7.53968254f*(u*u+v*v+w*w);
//// //scale
//// f0 =SF*f0 +(1.0f-SF)*(0.052631579f*rho+ -0.012531328f*(-11.f*rho+19.f*usqr)+ 0.047619048f*m2);
//// f1 =SF*f1 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
//// f2 =SF*f2 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
//// f3 =SF*f3 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
//// f4 =SF*f4 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
//// f5 =SF*f5 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+v)+0.01666666667f*(-u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
//// f6 =SF*f6 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+v)+0.01666666667f*( u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
//// f7 =SF*f7 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-v)+0.01666666667f*( u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
//// f8 =SF*f8 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-v)+0.01666666667f*(-u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
//// f9 =SF*f9 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*w) -0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
//// f10=SF*f10+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+w)+0.01666666667f*(-u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f11=SF*f11+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
//// f12=SF*f12+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+w)+0.01666666667f*( u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) -0.25f*u*v);
//// f13=SF*f13+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w );
//// f14=SF*f14+(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
//// f15=SF*f15+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-w)+0.01666666667f*(-u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f16=SF*f16+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
//// f17=SF*f17+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-w)+0.01666666667f*( u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f18=SF*f18+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// }
// else
// {
// //scale
// f0 =SF*f0 +(1.0f-SF)*(0.3333333333f*(rho-1.5f*usqr));
// f1 =SF*f1 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =SF*f2 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =SF*f3 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =SF*f4 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =SF*f5 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =SF*f6 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =SF*f7 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =SF*f8 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =SF*f9 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=SF*f10+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=SF*f11+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=SF*f12+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=SF*f13+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=SF*f14+(1.0f-SF)*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=SF*f15+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=SF*f16+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=SF*f17+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=SF*f18+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
// }
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1D ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3D ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5D ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6D ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7D ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8D ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15D,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17D,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10D,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12D,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_hybBADC(float* fin, float* fout,
float omega, size_t pitch, float SF)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered.
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation
// int zminus = int(zcoord);
// int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < 2 || x > XLRDIM-3 || y < 2 || y > YLRDIM-3 || z < 2 || z > ZLRDIM-3)
{
//no interp
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1D ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3D ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5D ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6D ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7D ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8D ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15D,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17D,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10D,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12D,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_hybBADC2(float* fin, float* fout,
float omega, size_t pitch, float SF, int n)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered.
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation
// int zminus = int(zcoord);
// int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < n || x > XLRDIM-1-n || y < n || y > YLRDIM-1-n || z < n || z > ZLRDIM-1-n)
{
//no interp
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1D ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3D ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5D ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6D ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7D ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8D ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15D,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17D,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10D,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12D,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_hybBADC_Interp(float* fin, float* fout,
float omega, size_t pitch, float SF)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered.
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation
int zminus = int(zcoord);
int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL || z < LRLEVEL || z > ZLRDIM-1-LRLEVEL)
{
//interpolate for next time step. from B
f0 = (zplus-zcoord)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f2 = (zplus-zcoord)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f4 = (zplus-zcoord)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f9 = (zplus-zcoord)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f11= (zplus-zcoord)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f13= (zplus-zcoord)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f14= (zplus-zcoord)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f16= (zplus-zcoord)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f18= (zplus-zcoord)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f1 = (zplus-zcoord)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f3 = (zplus-zcoord)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f5 = (zplus-zcoord)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f6 = (zplus-zcoord)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f7 = (zplus-zcoord)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f8 = (zplus-zcoord)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f15= (zplus-zcoord)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f17= (zplus-zcoord)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f10= (zplus-zcoord)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f12= (zplus-zcoord)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF);
// float rho,u,v,w;
// rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
// f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
//// u = rho*u;
//// v = rho*v;
//// w = rho*w;
// float usqr = fma(u,u,fma(v,v,w*w));
//
// if(MODEL == "MRT")
// {
// float meq0 = rho;
// float meq1 = -11.f*rho+19.f*(u*u+v*v+w*w);
// float meq2 = -7.53968254f*(u*u+v*v+w*w);
// float meq3 = u;
// float meq4 = -0.66666667f*u;//qx_eq
// float meq5 = v;
// float meq6 = -0.66666667f*v;//qx_eq
// float meq7 = w;
// float meq8 = -0.66666667f*w;//qx_eq
// float meq9 = (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq
// float meq11= (v*v-w*w);//pww_eq
// float meq13= u*v;//pxy_eq
// float meq14= v*w;//pyz_eq
// float meq15= u*w;//pxz_eq
//
////float feq0 = 0.052631579f*meq0+ -0.012531328f*meq1+ 0.047619048f*meq2 ;
////float feq1 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ (0.166666667f*u) ;
////float feq2 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + (0.166666667f*v) ;
////float feq3 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ -(0.166666667f*u) ;
////float feq4 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -(0.166666667f*v) ;
////float feq5 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3+meq5)+ 0.025f*(meq4+meq6) ;
////float feq6 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3-meq5)+ -0.025f*(meq4-meq6) ;
////float feq7 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3+meq5)+ -0.025f*(meq4+meq6) ;
////float feq8 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3-meq5)+ 0.025f*(meq4-meq6) ;
////float feq9 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + (0.166666667f*w);
////float feq10= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3+meq7)+ 0.025f*(meq4+meq8) ;
////float feq11= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*(meq5+meq7)+ 0.025f*(meq6+meq8);
////float feq12= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3-meq7)+ -0.025f*(meq4-meq8) ;
////float feq13= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + -0.1f*(meq5-meq7)+ -0.025f*(meq6-meq8);
////float feq14= 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -(0.166666667f*w);
////float feq15= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3-meq7)+ 0.025f*(meq4-meq8) ;
////float feq16= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*(meq5-meq7)+ 0.025f*(meq6-meq8);
////float feq17= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3+meq7)+ -0.025f*(meq4+meq8) ;
////float feq18= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ + -0.1f*(meq5+meq7)+ -0.025f*(meq6+meq8);
//
//float feq0 = 0.052631579f*meq0+ -0.012531328f*meq1+ 0.047619048f*meq2 ;
//float feq1 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ 0.1f*(meq3-meq4) ;
//float feq2 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + 0.1f*(meq5-meq6) ;
//float feq3 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2+ -0.1f*(meq3-meq4) ;
//float feq4 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -0.1f*(meq5-meq6) ;
//float feq5 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3+meq5)+ 0.025f*(meq4+meq6) ;
//float feq6 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3-meq5)+ -0.025f*(meq4-meq6) ;
//float feq7 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*(meq3+meq5)+ -0.025f*(meq4+meq6) ;
//float feq8 = 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*(meq3-meq5)+ 0.025f*(meq4-meq6) ;
//float feq9 = 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + 0.1f*meq7+ -0.1f*meq8;
//float feq10= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*meq3+ 0.025f*meq4 + 0.1f*meq7+ 0.025f*meq8;
//float feq11= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*meq5+ 0.025f*meq6+ 0.1f*meq7+ 0.025f*meq8;
//float feq12= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*meq3+ -0.025f*meq4 + 0.1f*meq7+ 0.025f*meq8;
//float feq13= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + -0.1f*meq5+ -0.025f*meq6+ 0.1f*meq7+ 0.025f*meq8;
//float feq14= 0.052631579f*meq0+ -0.0045948204f*meq1+ -0.015873016f*meq2 + -0.1f*meq7+ 0.1f*meq8;
//float feq15= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ 0.1f*meq3+ 0.025f*meq4 + -0.1f*meq7+ -0.025f*meq8;
//float feq16= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2 + 0.1f*meq5+ 0.025f*meq6+ -0.1f*meq7+ -0.025f*meq8;
//float feq17= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ -0.1f*meq3+ -0.025f*meq4 + -0.1f*meq7+ -0.025f*meq8;
//float feq18= 0.052631579f*meq0+ 0.0033416876f*meq1+ 0.003968254f*meq2+ + -0.1f*meq5+ -0.025f*meq6+ -0.1f*meq7+ -0.025f*meq8;
//
//feq1 += 0.055555556f*meq9;
//feq2 += -0.027777778f*meq9+ 0.083333333f*meq11;
//feq3 += 0.055555556f*meq9;
//feq4 += -0.027777778f*meq9+ 0.083333333f*meq11;
//feq5 += 0.027777778f*meq9+ 0.083333333f*meq11+ 0.25f*meq13 ;
//feq6 += 0.027777778f*meq9+ 0.083333333f*meq11+ -0.25f*meq13 ;
//feq7 += 0.027777778f*meq9+ 0.083333333f*meq11+ 0.25f*meq13 ;
//feq8 += 0.027777778f*meq9+ 0.083333333f*meq11+ -0.25f*meq13 ;
//feq9 += -0.027777778f*meq9+ -0.083333333f*meq11 ;
//feq10+= 0.027777778f*meq9+ -0.083333333f*meq11 + 0.25f*meq15;
//feq11+= -0.055555556f*meq9 + 0.25f*meq14 ;
//feq12+= 0.027777778f*meq9+ -0.083333333f*meq11 + -0.25f*meq15;
//feq13+= -0.055555556f*meq9 -0.25f*meq14 ;
//feq14+= -0.027777778f*meq9+ -0.083333333f*meq11 ;
//feq15+= 0.027777778f*meq9+ -0.083333333f*meq11 + -0.25f*meq15;
//feq16+= -0.055555556f*meq9 + -0.25f*meq14 ;
//feq17+= 0.027777778f*meq9+ -0.083333333f*meq11 + 0.25f*meq15;
//feq18+= -0.055555556f*meq9 + 0.25f*meq14 ;
//
//
//f0 =SF*f0 +(1.0f-SF)*feq0 ;
//f1 =SF*f1 +(1.0f-SF)*feq1 ;
//f2 =SF*f2 +(1.0f-SF)*feq2 ;
//f3 =SF*f3 +(1.0f-SF)*feq3 ;
//f4 =SF*f4 +(1.0f-SF)*feq4 ;
//f5 =SF*f5 +(1.0f-SF)*feq5 ;
//f6 =SF*f6 +(1.0f-SF)*feq6 ;
//f7 =SF*f7 +(1.0f-SF)*feq7 ;
//f8 =SF*f8 +(1.0f-SF)*feq8 ;
//f9 =SF*f9 +(1.0f-SF)*feq9 ;
//f10=SF*f10+(1.0f-SF)*feq10;
//f11=SF*f11+(1.0f-SF)*feq11;
//f12=SF*f12+(1.0f-SF)*feq12;
//f13=SF*f13+(1.0f-SF)*feq13;
//f14=SF*f14+(1.0f-SF)*feq14;
//f15=SF*f15+(1.0f-SF)*feq15;
//f16=SF*f16+(1.0f-SF)*feq16;
//f17=SF*f17+(1.0f-SF)*feq17;
//f18=SF*f18+(1.0f-SF)*feq18;
//
//
//
//
//
//
//// float m2 = -7.53968254f*(u*u+v*v+w*w);
//// //scale
//// f0 =SF*f0 +(1.0f-SF)*(0.052631579f*rho+ -0.012531328f*(-11.f*rho+19.f*usqr)+ 0.047619048f*m2);
//// f1 =SF*f1 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
//// f2 =SF*f2 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
//// f3 =SF*f3 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
//// f4 =SF*f4 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
//// f5 =SF*f5 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+v)+0.01666666667f*(-u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
//// f6 =SF*f6 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+v)+0.01666666667f*( u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
//// f7 =SF*f7 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-v)+0.01666666667f*( u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
//// f8 =SF*f8 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-v)+0.01666666667f*(-u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
//// f9 =SF*f9 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*w) -0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
//// f10=SF*f10+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+w)+0.01666666667f*(-u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f11=SF*f11+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
//// f12=SF*f12+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+w)+0.01666666667f*( u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) -0.25f*u*v);
//// f13=SF*f13+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w );
//// f14=SF*f14+(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
//// f15=SF*f15+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-w)+0.01666666667f*(-u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f16=SF*f16+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
//// f17=SF*f17+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-w)+0.01666666667f*( u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
//// f18=SF*f18+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// }
// else
// {
// //scale
// f0 =SF*f0 +(1.0f-SF)*(0.3333333333f*(rho-1.5f*usqr));
// f1 =SF*f1 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =SF*f2 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =SF*f3 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =SF*f4 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =SF*f5 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =SF*f6 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =SF*f7 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =SF*f8 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =SF*f9 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=SF*f10+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=SF*f11+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=SF*f12+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=SF*f13+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=SF*f14+(1.0f-SF)*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=SF*f15+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=SF*f16+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=SF*f17+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=SF*f18+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
// }
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
else{
f0 = fin[j];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1D ,x-1+0.5f,y +0.5f+YLRDIM*(z));
f3 = tex2D(texRef_f3D ,x+1+0.5f,y +0.5f+YLRDIM*(z));
f5 = tex2D(texRef_f5D ,x-1+0.5f,y-1+0.5f+YLRDIM*(z));
f6 = tex2D(texRef_f6D ,x+1+0.5f,y-1+0.5f+YLRDIM*(z));
f7 = tex2D(texRef_f7D ,x+1+0.5f,y+1+0.5f+YLRDIM*(z));
f8 = tex2D(texRef_f8D ,x-1+0.5f,y+1+0.5f+YLRDIM*(z));
f15= tex2D(texRef_f15D,x-1+0.5f,y +0.5f+YLRDIM*(z+1));
f17= tex2D(texRef_f17D,x+1+0.5f,y +0.5f+YLRDIM*(z+1));
f10= tex2D(texRef_f10D,x-1+0.5f,y +0.5f+YLRDIM*(z-1));
f12= tex2D(texRef_f12D,x+1+0.5f,y +0.5f+YLRDIM*(z-1));
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_ABCD(float* fin, float* fout,
float omega, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
// if(x < 2 || x > LRX0+XLRDIM-3 || y < 2 || y > LRY0+YLRDIM-3 || z < 2 || z > LRZ0+ZLRDIM-3)
// im = -1;//not valid for extraction
// if(x < 1 || x > LRX0+XLRDIM-2 || y < 1 || y > LRY0+YLRDIM-2 || z < 1 || z > LRZ0+ZLRDIM-2)
// {
// im = -2;//not valid for second TS
// }
if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2)
{
//dont do anything
}
else{
f0 = fin[j];
f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)];
f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)];
f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)];
f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f10= fin[f_memLR(10,x-1,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f12= fin[f_memLR(12,x+1,y ,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f15= fin[f_memLR(15,x-1,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
f17= fin[f_memLR(17,x+1,y ,z+1,pitch)];
f18= fin[f_memLR(18,x ,dmin(y+1,YLRDIM),dmin(z+1,ZLRDIM),pitch)];
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_ABDC(float* fin, float* fout,
float omega, size_t pitch, float SF)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered.
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation
int zminus = int(zcoord);
int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
// if(x < 2 || x > LRX0+XLRDIM-3 || y < 2 || y > LRY0+YLRDIM-3 || z < 2 || z > LRZ0+ZLRDIM-3)
// im = -1;//not valid for extraction
// if(x < 1 || x > LRX0+XLRDIM-2 || y < 1 || y > LRY0+YLRDIM-2 || z < 1 || z > LRZ0+ZLRDIM-2)
// {
// im = -2;//not valid for second TS
// }
if(x < 2 || x > XLRDIM-3 || y < 2 || y > YLRDIM-3 || z < 2 || z > ZLRDIM-3)
{
//interpolate for next time step. from B
f0 = (zplus-zcoord)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f2 = (zplus-zcoord)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f4 = (zplus-zcoord)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f9 = (zplus-zcoord)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f11= (zplus-zcoord)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f13= (zplus-zcoord)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f14= (zplus-zcoord)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f16= (zplus-zcoord)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f18= (zplus-zcoord)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f1 = (zplus-zcoord)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f3 = (zplus-zcoord)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f5 = (zplus-zcoord)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f6 = (zplus-zcoord)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f7 = (zplus-zcoord)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f8 = (zplus-zcoord)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f15= (zplus-zcoord)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f17= (zplus-zcoord)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f10= (zplus-zcoord)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f12= (zplus-zcoord)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF);
// float rho,u,v,w;
// rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
// f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
// float usqr = fma(u,u,fma(v,v,w*w));
//
// if(MODEL == "MRT")
// {
// float m2 = -7.53968254f*(u*u+v*v+w*w);
// //scale
// f0 =SF*f0 +(1.0f-SF)*(0.052631579f*rho+ -0.012531328f*(-11.f*rho+19.f*usqr)+ 0.047619048f*m2);
// f1 =SF*f1 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
// f2 =SF*f2 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
// f3 =SF*f3 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
// f4 =SF*f4 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
// f5 =SF*f5 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+v)+0.01666666667f*(-u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
// f6 =SF*f6 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+v)+0.01666666667f*( u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
// f7 =SF*f7 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-v)+0.01666666667f*( u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
// f8 =SF*f8 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-v)+0.01666666667f*(-u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
// f9 =SF*f9 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*w) -0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
// f10=SF*f10+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+w)+0.01666666667f*(-u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
// f11=SF*f11+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// f12=SF*f12+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+w)+0.01666666667f*( u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) -0.25f*u*v);
// f13=SF*f13+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w );
// f14=SF*f14+(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
// f15=SF*f15+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-w)+0.01666666667f*(-u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
// f16=SF*f16+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// f17=SF*f17+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-w)+0.01666666667f*( u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
// f18=SF*f18+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// }
// else
// {
//
// //scale
// f0 =SF*f0 +(1.0f-SF)*(0.3333333333f*(rho-1.5f*usqr));
// f1 =SF*f1 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =SF*f2 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =SF*f3 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =SF*f4 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =SF*f5 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =SF*f6 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =SF*f7 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =SF*f8 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =SF*f9 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=SF*f10+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=SF*f11+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=SF*f12+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=SF*f13+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=SF*f14+(1.0f-SF)*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=SF*f15+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=SF*f16+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=SF*f17+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=SF*f18+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
// }
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
else{
f0 = fin[j];
f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)];
f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)];
f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)];
f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f10= fin[f_memLR(10,x-1,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f12= fin[f_memLR(12,x+1,y ,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f15= fin[f_memLR(15,x-1,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
f17= fin[f_memLR(17,x+1,y ,z+1,pitch)];
//if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,dmin(y+1,YLRDIM),dmin(z+1,ZLRDIM),pitch)];
//}
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_BACD(float* fin, float* fout,
float omega, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
// if(x < 2 || x > LRX0+XLRDIM-3 || y < 2 || y > LRY0+YLRDIM-3 || z < 2 || z > LRZ0+ZLRDIM-3)
// im = -1;//not valid for extraction
// if(x < 1 || x > LRX0+XLRDIM-2 || y < 1 || y > LRY0+YLRDIM-2 || z < 1 || z > LRZ0+ZLRDIM-2)
// {
// im = -2;//not valid for second TS
// }
if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2)
{
//dont do anything
}
else{
f0 = fin[j];
f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)];
f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)];
f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)];
f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f10= fin[f_memLR(10,x-1,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f12= fin[f_memLR(12,x+1,y ,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f15= fin[f_memLR(15,x-1,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
f17= fin[f_memLR(17,x+1,y ,z+1,pitch)];
f18= fin[f_memLR(18,x ,dmin(y+1,YLRDIM),dmin(z+1,ZLRDIM),pitch)];
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void LR_d_BADC(float* fin, float* fout,
float omega, size_t pitch, float SF)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int zminus = int(zcoord);
int zplus = zminus+1;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
// if(x < 2 || x > LRX0+XLRDIM-3 || y < 2 || y > LRY0+YLRDIM-3 || z < 2 || z > LRZ0+ZLRDIM-3)
// im = -1;//not valid for extraction
// if(x < 1 || x > LRX0+XLRDIM-2 || y < 1 || y > LRY0+YLRDIM-2 || z < 1 || z > LRZ0+ZLRDIM-2)
// {
// im = -2;//not valid for second TS
// }
if(x < 2 || x > XLRDIM-3 || y < 2 || y > YLRDIM-3 || z < 2 || z > ZLRDIM-3)
{
//interpolate for next time step. from A
f0 = (zplus-zcoord)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f2 = (zplus-zcoord)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f4 = (zplus-zcoord)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f9 = (zplus-zcoord)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f11= (zplus-zcoord)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f13= (zplus-zcoord)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f14= (zplus-zcoord)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f16= (zplus-zcoord)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f18= (zplus-zcoord)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f1 = (zplus-zcoord)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f3 = (zplus-zcoord)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f5 = (zplus-zcoord)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f6 = (zplus-zcoord)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f7 = (zplus-zcoord)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f8 = (zplus-zcoord)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f15= (zplus-zcoord)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f17= (zplus-zcoord)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f10= (zplus-zcoord)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
f12= (zplus-zcoord)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus));
mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF);
// float rho,u,v,w;
// rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
// f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
// float usqr = fma(u,u,fma(v,v,w*w));
//
// if(MODEL == "MRT")
// {
// float m2 = -7.53968254f*(u*u+v*v+w*w);
// //scale
// f0 =SF*f0 +(1.0f-SF)*(0.052631579f*rho+ -0.012531328f*(-11.f*rho+19.f*usqr)+ 0.047619048f*m2);
// f1 =SF*f1 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
// f2 =SF*f2 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
// f3 =SF*f3 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*u) +0.055555556f*(2.f*u*u-(v*v+w*w)) );
// f4 =SF*f4 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*v) -0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
// f5 =SF*f5 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+v)+0.01666666667f*(-u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
// f6 =SF*f6 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+v)+0.01666666667f*( u-v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
// f7 =SF*f7 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-v)+0.01666666667f*( u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)+0.25f*(u*v) );
// f8 =SF*f8 +(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-v)+0.01666666667f*(-u+v) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w)-0.25f*(u*v) );
// f9 =SF*f9 +(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +( 0.1666666667f*w) -0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) );
// f10=SF*f10+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u+w)+0.01666666667f*(-u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
// f11=SF*f11+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// f12=SF*f12+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u+w)+0.01666666667f*( u-w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) -0.25f*u*v);
// f13=SF*f13+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v+w)+0.01666666667f*(-v-w) -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w );
// f14=SF*f14+(1.0f-SF)*(0.052631579f*rho+-0.0045948204f*(-11.f*rho+19.f*usqr)+-0.015873016f*m2 +(-0.1666666667f*w) +0.027777778f*(2.f*u*u-(v*v+w*w)) +0.0833333333f*(v*v-w*w) );
// f15=SF*f15+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( u-w)+0.01666666667f*(-u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
// f16=SF*f16+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*( v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// f17=SF*f17+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-u-w)+0.01666666667f*( u+w) +0.027777778f*(2.f*u*u-(v*v+w*w)) -0.0833333333f*(v*v-w*w) +0.25f*u*v);
// f18=SF*f18+(1.0f-SF)*(0.052631579f*rho+ 0.0033416876f*(-11.f*rho+19.f*usqr)+ 0.003968254f*m2 +0.1f*(-v-w)+0.01666666667f*(-v+w) -0.055555556f*(2.f*u*u-(v*v+w*w)) +0.25f*v*w );
// }
// else
// {
//
// //scale
// f0 =SF*f0 +(1.0f-SF)*(0.3333333333f*(rho-1.5f*usqr));
// f1 =SF*f1 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =SF*f2 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =SF*f3 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =SF*f4 +(1.0f-SF)*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =SF*f5 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =SF*f6 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =SF*f7 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =SF*f8 +(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =SF*f9 +(1.0f-SF)*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=SF*f10+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=SF*f11+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=SF*f12+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=SF*f13+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=SF*f14+(1.0f-SF)*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=SF*f15+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=SF*f16+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=SF*f17+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=SF*f18+(1.0f-SF)*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
// }
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
else{
f0 = fin[j];
f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)];
f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)];
f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)];
f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)];
f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)];
f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)];
f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)];
f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)];
f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)];
f10= fin[f_memLR(10,x-1,y ,z-1,pitch)];
f11= fin[f_memLR(11,x ,y-1,z-1,pitch)];
f12= fin[f_memLR(12,x+1,y ,z-1,pitch)];
f13= fin[f_memLR(13,x ,y+1,z-1,pitch)];
f14= fin[f_memLR(14,x ,y ,z+1,pitch)];
f15= fin[f_memLR(15,x-1,y ,z+1,pitch)];
f16= fin[f_memLR(16,x ,y-1,z+1,pitch)];
f17= fin[f_memLR(17,x+1,y ,z+1,pitch)];
//if(z != ZLRDIM-1){
f18= fin[f_memLR(18,x ,dmin(y+1,YLRDIM),dmin(z+1,ZLRDIM),pitch)];
//}
//else f18 = 0.1f;
if(im == 1 || im ==10){//BB
fout[f_memLR(1 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f14;
fout[f_memLR(10,x,y,z,pitch)] = f17;
fout[f_memLR(11,x,y,z,pitch)] = f18;
fout[f_memLR(12,x,y,z,pitch)] = f15;
fout[f_memLR(13,x,y,z,pitch)] = f16;
fout[f_memLR(14,x,y,z,pitch)] = f9 ;
fout[f_memLR(15,x,y,z,pitch)] = f12;
fout[f_memLR(16,x,y,z,pitch)] = f13;
fout[f_memLR(17,x,y,z,pitch)] = f10;
fout[f_memLR(18,x,y,z,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_memLR(0 ,x,y,z,pitch)] = f0 ;
fout[f_memLR(1 ,x,y,z,pitch)] = f1 ;
fout[f_memLR(2 ,x,y,z,pitch)] = f2 ;
fout[f_memLR(3 ,x,y,z,pitch)] = f3 ;
fout[f_memLR(4 ,x,y,z,pitch)] = f4 ;
fout[f_memLR(5 ,x,y,z,pitch)] = f5 ;
fout[f_memLR(6 ,x,y,z,pitch)] = f6 ;
fout[f_memLR(7 ,x,y,z,pitch)] = f7 ;
fout[f_memLR(8 ,x,y,z,pitch)] = f8 ;
fout[f_memLR(9 ,x,y,z,pitch)] = f9 ;
fout[f_memLR(10,x,y,z,pitch)] = f10;
fout[f_memLR(11,x,y,z,pitch)] = f11;
fout[f_memLR(12,x,y,z,pitch)] = f12;
fout[f_memLR(13,x,y,z,pitch)] = f13;
fout[f_memLR(14,x,y,z,pitch)] = f14;
fout[f_memLR(15,x,y,z,pitch)] = f15;
fout[f_memLR(16,x,y,z,pitch)] = f16;
fout[f_memLR(17,x,y,z,pitch)] = f17;
fout[f_memLR(18,x,y,z,pitch)] = f18;
}
}//end else (not at edge of LR)
}
__global__ void mrt_d_hybAB_force(float* fin, float* fout,
float omega, size_t pitch, float *FX, float *FY, float *FZ, int t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;//;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
// if((REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 &&
// y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 &&
// z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)))
// {
// }
// else{
f0 = fin[j];
f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)];
f11= fin[f_mem(11,x ,y-1,z-1,pitch)];
f13= fin[f_mem(13,x ,y+1,z-1,pitch)];
f14= fin[f_mem(14,x ,y ,z+1,pitch)];
f16= fin[f_mem(16,x ,y-1,z+1,pitch)];
if(z != ZDIM-1){
f18= fin[f_mem(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1A ,x-1+0.5f,y +0.5f+YDIM*(z));
f3 = tex2D(texRef_f3A ,x+1+0.5f,y +0.5f+YDIM*(z));
f5 = tex2D(texRef_f5A ,x-1+0.5f,y-1+0.5f+YDIM*(z));
f6 = tex2D(texRef_f6A ,x+1+0.5f,y-1+0.5f+YDIM*(z));
f7 = tex2D(texRef_f7A ,x+1+0.5f,y+1+0.5f+YDIM*(z));
f8 = tex2D(texRef_f8A ,x-1+0.5f,y+1+0.5f+YDIM*(z));
f15= tex2D(texRef_f15A,x-1+0.5f,y +0.5f+YDIM*(z+1));
f17= tex2D(texRef_f17A,x+1+0.5f,y +0.5f+YDIM*(z+1));
f10= tex2D(texRef_f10A,x-1+0.5f,y +0.5f+YDIM*(z-1));
f12= tex2D(texRef_f12A,x+1+0.5f,y +0.5f+YDIM*(z-1));
int im = ImageFcn(x,y,z);
if(im == 1 || im == 10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fout[j+pitch*YDIM*ZDIM*1 ] = f3 ;
fout[j+pitch*YDIM*ZDIM*2 ] = f4 ;
fout[j+pitch*YDIM*ZDIM*3 ] = f1 ;
fout[j+pitch*YDIM*ZDIM*4 ] = f2 ;
fout[j+pitch*YDIM*ZDIM*5 ] = f7 ;
fout[j+pitch*YDIM*ZDIM*6 ] = f8 ;
fout[j+pitch*YDIM*ZDIM*7 ] = f5 ;
fout[j+pitch*YDIM*ZDIM*8 ] = f6 ;
fout[j+pitch*YDIM*ZDIM*9 ] = f14;
fout[j+pitch*YDIM*ZDIM*10] = f17;
fout[j+pitch*YDIM*ZDIM*11] = f18;
fout[j+pitch*YDIM*ZDIM*12] = f15;
fout[j+pitch*YDIM*ZDIM*13] = f16;
fout[j+pitch*YDIM*ZDIM*14] = f9 ;
fout[j+pitch*YDIM*ZDIM*15] = f12;
fout[j+pitch*YDIM*ZDIM*16] = f13;
fout[j+pitch*YDIM*ZDIM*17] = f10;
fout[j+pitch*YDIM*ZDIM*18] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 21)//ysymm top
// {
// ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 22)//ysymm bot
// {
// ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 23)//zsymm top
// {
// zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
// else if(im == 24)//zsymm bot
// {
// zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch)] = f9 ;
fout[f_mem(10,x,y,z,pitch)] = f10;
fout[f_mem(11,x,y,z,pitch)] = f11;
fout[f_mem(12,x,y,z,pitch)] = f12;
fout[f_mem(13,x,y,z,pitch)] = f13;
fout[f_mem(14,x,y,z,pitch)] = f14;
fout[f_mem(15,x,y,z,pitch)] = f15;
fout[f_mem(16,x,y,z,pitch)] = f16;
fout[f_mem(17,x,y,z,pitch)] = f17;
fout[f_mem(18,x,y,z,pitch)] = f18;
}
syncthreads();
if(check[0] == 1 && t>=STARTF){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t],sumX[0]);
atomicAdd(&FY[t],sumY[0]);
atomicAdd(&FZ[t],sumZ[0]);
}
}
// }
}
__global__ void mrt_d_hybBA_force(float* fin, float* fout,
float omega, size_t pitch, float *FX, float *FY, float *FZ, int t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;//;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
// if((REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 &&
// y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 &&
// z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)))
// {
// }
// else{
f0 = fin[j];
f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)];
f11= fin[f_mem(11,x ,y-1,z-1,pitch)];
f13= fin[f_mem(13,x ,y+1,z-1,pitch)];
f14= fin[f_mem(14,x ,y ,z+1,pitch)];
f16= fin[f_mem(16,x ,y-1,z+1,pitch)];
if(z != ZDIM-1){
f18= fin[f_mem(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1B ,x-1+0.5f,y +0.5f+YDIM*(z));
f3 = tex2D(texRef_f3B ,x+1+0.5f,y +0.5f+YDIM*(z));
f5 = tex2D(texRef_f5B ,x-1+0.5f,y-1+0.5f+YDIM*(z));
f6 = tex2D(texRef_f6B ,x+1+0.5f,y-1+0.5f+YDIM*(z));
f7 = tex2D(texRef_f7B ,x+1+0.5f,y+1+0.5f+YDIM*(z));
f8 = tex2D(texRef_f8B ,x-1+0.5f,y+1+0.5f+YDIM*(z));
f15= tex2D(texRef_f15B,x-1+0.5f,y +0.5f+YDIM*(z+1));
f17= tex2D(texRef_f17B,x+1+0.5f,y +0.5f+YDIM*(z+1));
f10= tex2D(texRef_f10B,x-1+0.5f,y +0.5f+YDIM*(z-1));
f12= tex2D(texRef_f12B,x+1+0.5f,y +0.5f+YDIM*(z-1));
int im = ImageFcn(x,y,z);
if(im == 1 || im == 10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fout[j+pitch*YDIM*ZDIM*1 ] = f3 ;
fout[j+pitch*YDIM*ZDIM*2 ] = f4 ;
fout[j+pitch*YDIM*ZDIM*3 ] = f1 ;
fout[j+pitch*YDIM*ZDIM*4 ] = f2 ;
fout[j+pitch*YDIM*ZDIM*5 ] = f7 ;
fout[j+pitch*YDIM*ZDIM*6 ] = f8 ;
fout[j+pitch*YDIM*ZDIM*7 ] = f5 ;
fout[j+pitch*YDIM*ZDIM*8 ] = f6 ;
fout[j+pitch*YDIM*ZDIM*9 ] = f14;
fout[j+pitch*YDIM*ZDIM*10] = f17;
fout[j+pitch*YDIM*ZDIM*11] = f18;
fout[j+pitch*YDIM*ZDIM*12] = f15;
fout[j+pitch*YDIM*ZDIM*13] = f16;
fout[j+pitch*YDIM*ZDIM*14] = f9 ;
fout[j+pitch*YDIM*ZDIM*15] = f12;
fout[j+pitch*YDIM*ZDIM*16] = f13;
fout[j+pitch*YDIM*ZDIM*17] = f10;
fout[j+pitch*YDIM*ZDIM*18] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 21)//ysymm top
// {
// ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 22)//ysymm bot
// {
// ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 23)//zsymm top
// {
// zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
// else if(im == 24)//zsymm bot
// {
// zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch)] = f9 ;
fout[f_mem(10,x,y,z,pitch)] = f10;
fout[f_mem(11,x,y,z,pitch)] = f11;
fout[f_mem(12,x,y,z,pitch)] = f12;
fout[f_mem(13,x,y,z,pitch)] = f13;
fout[f_mem(14,x,y,z,pitch)] = f14;
fout[f_mem(15,x,y,z,pitch)] = f15;
fout[f_mem(16,x,y,z,pitch)] = f16;
fout[f_mem(17,x,y,z,pitch)] = f17;
fout[f_mem(18,x,y,z,pitch)] = f18;
}
syncthreads();
if(check[0] == 1 && t>=STARTF){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t],sumX[0]);
atomicAdd(&FY[t],sumY[0]);
atomicAdd(&FZ[t],sumZ[0]);
}
}
// }
}
__global__ void mrt_d_hybAB(float* fin, float* fout,
float omega, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;//;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
// __shared__ int check[1];
// check[0] = 0;
// syncthreads();
// if((REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 &&
// y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 &&
// z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)))
// {
// }
// else{
f0 = fin[j];
f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)];
f11= fin[f_mem(11,x ,y-1,z-1,pitch)];
f13= fin[f_mem(13,x ,y+1,z-1,pitch)];
f14= fin[f_mem(14,x ,y ,z+1,pitch)];
f16= fin[f_mem(16,x ,y-1,z+1,pitch)];
if(z != ZDIM-1){
f18= fin[f_mem(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1A ,x-1+0.5f,y +0.5f+YDIM*(z));
f3 = tex2D(texRef_f3A ,x+1+0.5f,y +0.5f+YDIM*(z));
f5 = tex2D(texRef_f5A ,x-1+0.5f,y-1+0.5f+YDIM*(z));
f6 = tex2D(texRef_f6A ,x+1+0.5f,y-1+0.5f+YDIM*(z));
f7 = tex2D(texRef_f7A ,x+1+0.5f,y+1+0.5f+YDIM*(z));
f8 = tex2D(texRef_f8A ,x-1+0.5f,y+1+0.5f+YDIM*(z));
f15= tex2D(texRef_f15A,x-1+0.5f,y +0.5f+YDIM*(z+1));
f17= tex2D(texRef_f17A,x+1+0.5f,y +0.5f+YDIM*(z+1));
f10= tex2D(texRef_f10A,x-1+0.5f,y +0.5f+YDIM*(z-1));
f12= tex2D(texRef_f12A,x+1+0.5f,y +0.5f+YDIM*(z-1));
int im = ImageFcn(x,y,z);
if(im == 1 || im == 10){//BB
// if(im == 10){
// check[0] = 1;
// sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
// sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
// sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
// }
// else{
// sumX[threadIdx.x]=0.f;
// sumY[threadIdx.x]=0.f;
// sumZ[threadIdx.x]=0.f;
// }
fout[j+pitch*YDIM*ZDIM*1 ] = f3 ;
fout[j+pitch*YDIM*ZDIM*2 ] = f4 ;
fout[j+pitch*YDIM*ZDIM*3 ] = f1 ;
fout[j+pitch*YDIM*ZDIM*4 ] = f2 ;
fout[j+pitch*YDIM*ZDIM*5 ] = f7 ;
fout[j+pitch*YDIM*ZDIM*6 ] = f8 ;
fout[j+pitch*YDIM*ZDIM*7 ] = f5 ;
fout[j+pitch*YDIM*ZDIM*8 ] = f6 ;
fout[j+pitch*YDIM*ZDIM*9 ] = f14;
fout[j+pitch*YDIM*ZDIM*10] = f17;
fout[j+pitch*YDIM*ZDIM*11] = f18;
fout[j+pitch*YDIM*ZDIM*12] = f15;
fout[j+pitch*YDIM*ZDIM*13] = f16;
fout[j+pitch*YDIM*ZDIM*14] = f9 ;
fout[j+pitch*YDIM*ZDIM*15] = f12;
fout[j+pitch*YDIM*ZDIM*16] = f13;
fout[j+pitch*YDIM*ZDIM*17] = f10;
fout[j+pitch*YDIM*ZDIM*18] = f11;
}
else{
// syncthreads();
// check[0] = 1;
// sumX[threadIdx.x]=0.f;
// sumY[threadIdx.x]=0.f;
// sumZ[threadIdx.x]=0.f;
// if(im == 0){
// }
// else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
//// if(im == 15)//DirichletNorth
//// {
//// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//// }
//// if(im == 16)//DirichletSouth
//// {
//// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//// }
// if(im == 21)//ysymm top
// {
// ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 22)//ysymm bot
// {
// ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// if(im == 23)//zsymm top
// {
// zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
// else if(im == 24)//zsymm bot
// {
// zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
//// }
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch)] = f9 ;
fout[f_mem(10,x,y,z,pitch)] = f10;
fout[f_mem(11,x,y,z,pitch)] = f11;
fout[f_mem(12,x,y,z,pitch)] = f12;
fout[f_mem(13,x,y,z,pitch)] = f13;
fout[f_mem(14,x,y,z,pitch)] = f14;
fout[f_mem(15,x,y,z,pitch)] = f15;
fout[f_mem(16,x,y,z,pitch)] = f16;
fout[f_mem(17,x,y,z,pitch)] = f17;
fout[f_mem(18,x,y,z,pitch)] = f18;
}
// syncthreads();
// if(check[0] == 1 && t>=STARTF){
// //reduction for force
// int nTotalThreads = blockDim.x;
// while(nTotalThreads > 1){
// int halfPoint = (nTotalThreads >> 1);
// if(threadIdx.x < halfPoint){
// sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
// sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
// sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
// }
// syncthreads();
// nTotalThreads = halfPoint;
// }
// if(threadIdx.x == 0){
// atomicAdd(&FX[t],sumX[0]);
// atomicAdd(&FY[t],sumY[0]);
// atomicAdd(&FZ[t],sumZ[0]);
// }
// }
// }
}
//{
// int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
// int y = threadIdx.y+blockIdx.y*blockDim.y;//;
// int z = threadIdx.z+blockIdx.z*blockDim.z;
// int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
// int im = ImageFcn(x,y,z);
// float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
//
//
//// if((REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 &&
//// y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 &&
//// z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
//// (x>XDIM-1)))
//// {
//// }
//// else{
//
// f0 = fin[j];
//// f2 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch)];
//// f4 = fin[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch)];
//// f9 = fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch)];
//// f11= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch)];
//// f13= fin[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch)];
//// f14= fin[f_mem(14,x ,y ,dmin(z+1,ZDIM),pitch)];
//// f16= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)];
//// f18= fin[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM),pitch)];
// f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)];
// f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)];
// f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)];
// f11= fin[f_mem(11,x ,y-1,z-1,pitch)];
// f13= fin[f_mem(13,x ,y+1,z-1,pitch)];
// f14= fin[f_mem(14,x ,y ,z+1,pitch)];
// f16= fin[f_mem(16,x ,y-1,z+1,pitch)];
// if(z != ZDIM-1){
// f18= fin[f_mem(18,x ,y+1,z+1,pitch)];
// }
// f1 = tex2D(texRef_f1A ,x-1+0.5f,y +0.5f+YDIM*(z));
// f3 = tex2D(texRef_f3A ,x+1+0.5f,y +0.5f+YDIM*(z));
// f5 = tex2D(texRef_f5A ,x-1+0.5f,y-1+0.5f+YDIM*(z));
// f6 = tex2D(texRef_f6A ,x+1+0.5f,y-1+0.5f+YDIM*(z));
// f7 = tex2D(texRef_f7A ,x+1+0.5f,y+1+0.5f+YDIM*(z));
// f8 = tex2D(texRef_f8A ,x-1+0.5f,y+1+0.5f+YDIM*(z));
// f15= tex2D(texRef_f15A,x-1+0.5f,y +0.5f+YDIM*(z+1));
// f17= tex2D(texRef_f17A,x+1+0.5f,y +0.5f+YDIM*(z+1));
// f10= tex2D(texRef_f10A,x-1+0.5f,y +0.5f+YDIM*(z-1));
// f12= tex2D(texRef_f12A,x+1+0.5f,y +0.5f+YDIM*(z-1));
//
//
// if(im == 1 || im ==10){//BB
// fout[j+pitch*YDIM*ZDIM*1 ] = f3 ;
// fout[j+pitch*YDIM*ZDIM*2 ] = f4 ;
// fout[j+pitch*YDIM*ZDIM*3 ] = f1 ;
// fout[j+pitch*YDIM*ZDIM*4 ] = f2 ;
// fout[j+pitch*YDIM*ZDIM*5 ] = f7 ;
// fout[j+pitch*YDIM*ZDIM*6 ] = f8 ;
// fout[j+pitch*YDIM*ZDIM*7 ] = f5 ;
// fout[j+pitch*YDIM*ZDIM*8 ] = f6 ;
// fout[j+pitch*YDIM*ZDIM*9 ] = f14;
// fout[j+pitch*YDIM*ZDIM*10] = f17;
// fout[j+pitch*YDIM*ZDIM*11] = f18;
// fout[j+pitch*YDIM*ZDIM*12] = f15;
// fout[j+pitch*YDIM*ZDIM*13] = f16;
// fout[j+pitch*YDIM*ZDIM*14] = f9 ;
// fout[j+pitch*YDIM*ZDIM*15] = f12;
// fout[j+pitch*YDIM*ZDIM*16] = f13;
// fout[j+pitch*YDIM*ZDIM*17] = f10;
// fout[j+pitch*YDIM*ZDIM*18] = f11;
//
//// fout[f_mem(1 ,x,y,z,pitch)] = f3 ;
//// fout[f_mem(2 ,x,y,z,pitch)] = f4 ;
//// fout[f_mem(3 ,x,y,z,pitch)] = f1 ;
//// fout[f_mem(4 ,x,y,z,pitch)] = f2 ;
//// fout[f_mem(5 ,x,y,z,pitch)] = f7 ;
//// fout[f_mem(6 ,x,y,z,pitch)] = f8 ;
//// fout[f_mem(7 ,x,y,z,pitch)] = f5 ;
//// fout[f_mem(8 ,x,y,z,pitch)] = f6 ;
//// fout[f_mem(9 ,x,y,z,pitch)] = f14;
//// fout[f_mem(10,x,y,z,pitch)] = f17;
//// fout[f_mem(11,x,y,z,pitch)] = f18;
//// fout[f_mem(12,x,y,z,pitch)] = f15;
//// fout[f_mem(13,x,y,z,pitch)] = f16;
//// fout[f_mem(14,x,y,z,pitch)] = f9 ;
//// fout[f_mem(15,x,y,z,pitch)] = f12;
//// fout[f_mem(16,x,y,z,pitch)] = f13;
//// fout[f_mem(17,x,y,z,pitch)] = f10;
//// fout[f_mem(18,x,y,z,pitch)] = f11;
// }
// else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 21)//ysymm top
// {
// ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 22)//ysymm bot
// {
// ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 23)//zsymm top
// {
// zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
// else if(im == 24)//zsymm bot
// {
// zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
//
// if(MODEL == "MRT")
// mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
// else if(MODEL == "BGK")
// bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
//
//
// fout[f_mem(0 ,x,y,z,pitch)] = f0 ;
// fout[f_mem(1 ,x,y,z,pitch)] = f1 ;
// fout[f_mem(2 ,x,y,z,pitch)] = f2 ;
// fout[f_mem(3 ,x,y,z,pitch)] = f3 ;
// fout[f_mem(4 ,x,y,z,pitch)] = f4 ;
// fout[f_mem(5 ,x,y,z,pitch)] = f5 ;
// fout[f_mem(6 ,x,y,z,pitch)] = f6 ;
// fout[f_mem(7 ,x,y,z,pitch)] = f7 ;
// fout[f_mem(8 ,x,y,z,pitch)] = f8 ;
// fout[f_mem(9 ,x,y,z,pitch)] = f9 ;
// fout[f_mem(10,x,y,z,pitch)] = f10;
// fout[f_mem(11,x,y,z,pitch)] = f11;
// fout[f_mem(12,x,y,z,pitch)] = f12;
// fout[f_mem(13,x,y,z,pitch)] = f13;
// fout[f_mem(14,x,y,z,pitch)] = f14;
// fout[f_mem(15,x,y,z,pitch)] = f15;
// fout[f_mem(16,x,y,z,pitch)] = f16;
// fout[f_mem(17,x,y,z,pitch)] = f17;
// fout[f_mem(18,x,y,z,pitch)] = f18;
// }
//// }
//}
__global__ void mrt_d_hybBA(float* fin, float* fout,
float omega, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// if((REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 &&
// y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 &&
// z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)))
// {
// }
// else{
f0 = fin[j];
// f2 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch)];
// f4 = fin[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch)];
// f9 = fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch)];
// f11= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch)];
// f13= fin[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch)];
// f14= fin[f_mem(14,x ,y ,dmin(z+1,ZDIM),pitch)];
// f16= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)];
// f18= fin[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM),pitch)];
f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)];
f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)];
f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)];
f11= fin[f_mem(11,x ,y-1,z-1,pitch)];
f13= fin[f_mem(13,x ,y+1,z-1,pitch)];
f14= fin[f_mem(14,x ,y ,z+1,pitch)];
f16= fin[f_mem(16,x ,y-1,z+1,pitch)];
if(z != ZDIM-1){
f18= fin[f_mem(18,x ,y+1,z+1,pitch)];
}
f1 = tex2D(texRef_f1B ,x-1+0.5f,y +0.5f+YDIM*(z));
f3 = tex2D(texRef_f3B ,x+1+0.5f,y +0.5f+YDIM*(z));
f5 = tex2D(texRef_f5B ,x-1+0.5f,y-1+0.5f+YDIM*(z));
f6 = tex2D(texRef_f6B ,x+1+0.5f,y-1+0.5f+YDIM*(z));
f7 = tex2D(texRef_f7B ,x+1+0.5f,y+1+0.5f+YDIM*(z));
f8 = tex2D(texRef_f8B ,x-1+0.5f,y+1+0.5f+YDIM*(z));
f15= tex2D(texRef_f15B,x-1+0.5f,y +0.5f+YDIM*(z+1));
f17= tex2D(texRef_f17B,x+1+0.5f,y +0.5f+YDIM*(z+1));
f10= tex2D(texRef_f10B,x-1+0.5f,y +0.5f+YDIM*(z-1));
f12= tex2D(texRef_f12B,x+1+0.5f,y +0.5f+YDIM*(z-1));
int im = ImageFcn(x,y,z);
if(im == 1 || im ==10){//BB
fout[j+pitch*YDIM*ZDIM*1 ] = f3 ;
fout[j+pitch*YDIM*ZDIM*2 ] = f4 ;
fout[j+pitch*YDIM*ZDIM*3 ] = f1 ;
fout[j+pitch*YDIM*ZDIM*4 ] = f2 ;
fout[j+pitch*YDIM*ZDIM*5 ] = f7 ;
fout[j+pitch*YDIM*ZDIM*6 ] = f8 ;
fout[j+pitch*YDIM*ZDIM*7 ] = f5 ;
fout[j+pitch*YDIM*ZDIM*8 ] = f6 ;
fout[j+pitch*YDIM*ZDIM*9 ] = f14;
fout[j+pitch*YDIM*ZDIM*10] = f17;
fout[j+pitch*YDIM*ZDIM*11] = f18;
fout[j+pitch*YDIM*ZDIM*12] = f15;
fout[j+pitch*YDIM*ZDIM*13] = f16;
fout[j+pitch*YDIM*ZDIM*14] = f9 ;
fout[j+pitch*YDIM*ZDIM*15] = f12;
fout[j+pitch*YDIM*ZDIM*16] = f13;
fout[j+pitch*YDIM*ZDIM*17] = f10;
fout[j+pitch*YDIM*ZDIM*18] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 21)//ysymm top
// {
// ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 22)//ysymm bot
// {
// ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 23)//zsymm top
// {
// zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
// else if(im == 24)//zsymm bot
// {
// zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch)] = f9 ;
fout[f_mem(10,x,y,z,pitch)] = f10;
fout[f_mem(11,x,y,z,pitch)] = f11;
fout[f_mem(12,x,y,z,pitch)] = f12;
fout[f_mem(13,x,y,z,pitch)] = f13;
fout[f_mem(14,x,y,z,pitch)] = f14;
fout[f_mem(15,x,y,z,pitch)] = f15;
fout[f_mem(16,x,y,z,pitch)] = f16;
fout[f_mem(17,x,y,z,pitch)] = f17;
fout[f_mem(18,x,y,z,pitch)] = f18;
}
// }
}
__global__ void mrt_d_single(float* fA, float* fB,
float omega, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
if(REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1
&& y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
(x>XDIM-1)){
}
else{
f0 = fA[j];
// if(x != 0){
// f1 = fA[f_mem(1 ,x-1,y ,z ,pitch)];
// if(y != 0){
// f5 = fA[f_mem(5 ,x-1,y-1,z ,pitch)];
// }
// if(y != YDIM-1){
// f8 = fA[f_mem(8 ,x-1,y+1,z ,pitch)];
// }
// if(z != 0){
// f10= fA[f_mem(10,x-1,y ,z-1,pitch)];
// }
// if(z != ZDIM-1){
// f15= fA[f_mem(15,x-1,y ,z+1,pitch)];
// }
// }
//
// if(x != XDIM-1){
// f3 = fA[f_mem(3 ,x+1,y ,z ,pitch)];
// if(y != 0){
// f6 = fA[f_mem(6 ,x+1,y-1,z ,pitch)];
// }
// if(y != YDIM-1){
// f7 = fA[f_mem(7 ,x+1,y+1,z ,pitch)];
// }
// if(z != 0){
// f12= fA[f_mem(12,x+1,y ,z-1,pitch)];
// }
// if(z != ZDIM-1){
// f17= fA[f_mem(17,x+1,y ,z+1,pitch)];
// }
// }
//
// if(y != 0){
// f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)];
// if(z != 0){
// f11= fA[f_mem(11,x ,y-1,z-1,pitch)];
// }
// if(z != ZDIM-1){
// f16= fA[f_mem(16,x ,y-1,z+1,pitch)];
// }
// }
//
// if(y != YDIM-1){
// f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)];
// if(z != 0){
// f13= fA[f_mem(13,x ,y+1,z-1,pitch)];
// }
// if(z != ZDIM-1){
// f18= fA[f_mem(18,x ,y+1,z+1,pitch)];
// }
// }
//
// if(z != 0){
// f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)];
// }
// if(z != ZDIM-1){
// f14= fA[f_mem(14,x ,y ,z+1,pitch)];
// }
// int a = (x+y*pitch+z*YDIM*pitch)+pitch*YDIM*ZDIM;
// f1 = fA[a-1];//fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch)];
// a += pitch*YDIM*ZDIM;//+1-pitch;
// f2 = fA[a-pitch];//fA[f_me;//m(2 ,x ,dmax(y-1) ,z ,pitch)];
// a += pitch*YDIM*ZDIM;//+1+pitch;
// f3 = fA[a+1];//fA[f_me;//m(3 ,dmin(x+1,XDIM),y ,z ,pitch)];
// a += pitch*YDIM*ZDIM;//-1+pitch;
// f4 = fA[a+pitch];//fA[f_me;//m(4 ,x ,dmin(y+1,YDIM),z ,pitch)];
// a += pitch*YDIM*ZDIM;//-1-2*pitch;
// f5 = fA[a-1-pitch];//fA[f_me;//m(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch)];
// a += pitch*YDIM*ZDIM;//+2;
// f6 = fA[a+1-pitch];//fA[f_me;//m(6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch)];
// a += pitch*YDIM*ZDIM;//+2*pitch;
// f7 = fA[a+1+pitch];//fA[f_me;//m(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch)];
// a += pitch*YDIM*ZDIM;//-2;
// f8 = fA[a-1+pitch];//fA[f_me;//m(8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch)];
// a += pitch*YDIM*ZDIM;//+1-pitch-pitch*YDIM;
// f9 = fA[a];//fA[f_me;//m(9 ,x ,y ,dmax(z-1) ,pitch)];
// a += pitch*YDIM*ZDIM;//-1;
// f10= fA[a-1];//fA[f_me;//m(10,dmax(x-1) ,y ,dmax(z-1) ,pitch)];
// a += pitch*YDIM*ZDIM;//+1-pitch;
// f11= fA[a-pitch];//fA[f_me;//m(11,x ,dmax(y-1) ,dmax(z-1) ,pitch)];
// a += pitch*YDIM*ZDIM;//+1+pitch;
// f12= fA[a+1];//fA[f_me;//m(12,dmin(x+1,XDIM),y ,dmax(z-1) ,pitch)];
// a += pitch*YDIM*ZDIM;//-1+pitch;
// f13= fA[a+pitch];//fA[f_me;//m(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch)];
// if(z == ZDIM-1){
// a += pitch*YDIM*ZDIM;//-pitch+pitch*YDIM;
// f14= fA[a+YDIM*pitch];//fA[f_me;//m(14,x ,y ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//-1;
// f15= fA[a-1+YDIM*pitch];//fA[f_me;//m(15,dmax(x-1) ,y ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//+1-pitch;
// f16= fA[a-pitch+YDIM*pitch];//fA[f_me;//m(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//+1+pitch;
// f17= fA[a+1+YDIM*pitch];//fA[f_me;//m(17,dmin(x+1,XDIM),y ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//-1;//+pitch;
// f18= fA[a+pitch+YDIM*pitch];//fA[f_me;//m(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM),pitch)];
// }
// else{
// a += pitch*YDIM*ZDIM;//-pitch+2*pitch*YDIM;
// f14= fA[a-YDIM*pitch];//fA[f_me;//m(14,x ,y ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//-1;
// f15= fA[a-1-YDIM*pitch];//fA[f_me;//m(15,dmax(x-1) ,y ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//+1-pitch;
// f16= fA[a-pitch-YDIM*pitch];//fA[f_me;//m(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//+1+pitch;
// f17= fA[a+1-YDIM*pitch];//fA[f_me;//m(17,dmin(x+1,XDIM),y ,dmin(z+1,ZDIM),pitch)];
// a += pitch*YDIM*ZDIM;//-1+pitch;
// f18= fA[a+pitch-YDIM*pitch];//fA[f_me;//m(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM),pitch)];
// }
// int a = f_mem(1,x,y,z,pitch);
// f1 = fA[a];
// f3 = fA[a];
// f2 = fA[a];
// f5 = fA[a];
// f6 = fA[a];
// f4 = fA[a];
// f7 = fA[a];
// f8 = fA[a];
// f9 = fA[a];
// f10= fA[a];
// f11= fA[a];
// f12= fA[a];
// f13= fA[a];
// f14= fA[a];
// f15= fA[a];
// f16= fA[a];
// f17= fA[a];
// f18= fA[a];
f1 = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch)];
f3 = fA[f_mem(3 ,dmin(x+1,XDIM),y ,z ,pitch)];
f2 = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch)];
f5 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch)];
f6 = fA[f_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch)];
f4 = fA[f_mem(4 ,x ,dmin(y+1,YDIM),z ,pitch)];
f7 = fA[f_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch)];
f8 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch)];
f9 = fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch)];
f10= fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch)];
f11= fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch)];
f12= fA[f_mem(12,dmin(x+1,XDIM),y ,dmax(z-1) ,pitch)];
f13= fA[f_mem(13,x ,dmin(y+1,YDIM),dmax(z-1) ,pitch)];
f14= fA[f_mem(14,x ,y ,dmin(z+1,ZDIM),pitch)];
f15= fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,ZDIM),pitch)];
f16= fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,ZDIM),pitch)];
f17= fA[f_mem(17,dmin(x+1,XDIM),y ,dmin(z+1,ZDIM),pitch)];
f18= fA[f_mem(18,x ,dmin(y+1,YDIM),dmin(z+1,ZDIM),pitch)];
if(im == 1 || im ==10){//BB
fB[f_mem(1 ,x,y,z,pitch)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch)] = f14;
fB[f_mem(10,x,y,z,pitch)] = f17;
fB[f_mem(11,x,y,z,pitch)] = f18;
fB[f_mem(12,x,y,z,pitch)] = f15;
fB[f_mem(13,x,y,z,pitch)] = f16;
fB[f_mem(14,x,y,z,pitch)] = f9 ;
fB[f_mem(15,x,y,z,pitch)] = f12;
fB[f_mem(16,x,y,z,pitch)] = f13;
fB[f_mem(17,x,y,z,pitch)] = f10;
fB[f_mem(18,x,y,z,pitch)] = f11;
}
else{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 21)//ysymm top
// {
// ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 22)//ysymm bot
// {
// ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
// }
// else if(im == 23)//zsymm top
// {
// zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
// else if(im == 24)//zsymm bot
// {
// zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
// }
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch)] = f9 ;
fB[f_mem(10,x,y,z,pitch)] = f10;
fB[f_mem(11,x,y,z,pitch)] = f11;
fB[f_mem(12,x,y,z,pitch)] = f12;
fB[f_mem(13,x,y,z,pitch)] = f13;
fB[f_mem(14,x,y,z,pitch)] = f14;
fB[f_mem(15,x,y,z,pitch)] = f15;
fB[f_mem(16,x,y,z,pitch)] = f16;
fB[f_mem(17,x,y,z,pitch)] = f17;
fB[f_mem(18,x,y,z,pitch)] = f18;
}
}
}
__global__ void initialize_single(float *f, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.0f;
v = 0.0f;
w = 0.0f;
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
f[j+0 *pitch*YDIM*ZDIM]= 1.0f/3.0f*(rho-1.5f*usqr);
f[j+1 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+2 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+3 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+4 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+5 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f[j+6 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f[j+7 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f[j+8 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f[j+9 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+10*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f[j+11*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f[j+12*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f[j+13*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f[j+14*pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+15*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f[j+16*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f[j+17*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f[j+18*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
float feq0 = 0.1904761791f*rho;
float feq1 = 0.1031746045f*rho;
float feq2 = 0.1031746045f*rho;
float feq3 = 0.1031746045f*rho;
float feq4 = 0.1031746045f*rho;
float feq5 = 0.0158730149f*rho;
float feq6 = 0.0158730149f*rho;
float feq7 = 0.0158730149f*rho;
float feq8 = 0.0158730149f*rho;
float feq9 = 0.1031746045f*rho;
float feq10= 0.0158730149f*rho;
float feq11= 0.0158730149f*rho;
float feq12= 0.0158730149f*rho;
float feq13= 0.0158730149f*rho;
float feq14= 0.1031746045f*rho;
float feq15= 0.0158730149f*rho;
float feq16= 0.0158730149f*rho;
float feq17= 0.0158730149f*rho;
float feq18= 0.0158730149f*rho;
f[j+0 *pitch*YLRDIM*ZLRDIM]= feq0 ;
f[j+1 *pitch*YLRDIM*ZLRDIM]= feq1 ;
f[j+2 *pitch*YLRDIM*ZLRDIM]= feq2 ;
f[j+3 *pitch*YLRDIM*ZLRDIM]= feq3 ;
f[j+4 *pitch*YLRDIM*ZLRDIM]= feq4 ;
f[j+5 *pitch*YLRDIM*ZLRDIM]= feq5 ;
f[j+6 *pitch*YLRDIM*ZLRDIM]= feq6 ;
f[j+7 *pitch*YLRDIM*ZLRDIM]= feq7 ;
f[j+8 *pitch*YLRDIM*ZLRDIM]= feq8 ;
f[j+9 *pitch*YLRDIM*ZLRDIM]= feq9 ;
f[j+10*pitch*YLRDIM*ZLRDIM]= feq10;
f[j+11*pitch*YLRDIM*ZLRDIM]= feq11;
f[j+12*pitch*YLRDIM*ZLRDIM]= feq12;
f[j+13*pitch*YLRDIM*ZLRDIM]= feq13;
f[j+14*pitch*YLRDIM*ZLRDIM]= feq14;
f[j+15*pitch*YLRDIM*ZLRDIM]= feq15;
f[j+16*pitch*YLRDIM*ZLRDIM]= feq16;
f[j+17*pitch*YLRDIM*ZLRDIM]= feq17;
f[j+18*pitch*YLRDIM*ZLRDIM]= feq18;
if(x == XDIM-1){
for(int i = XDIM; i<pitch; i++){
j = i+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
f[j+0 *pitch*YDIM*ZDIM]=0.f;
f[j+1 *pitch*YDIM*ZDIM]=0.f;
f[j+2 *pitch*YDIM*ZDIM]=0.f;
f[j+3 *pitch*YDIM*ZDIM]=0.f;
f[j+4 *pitch*YDIM*ZDIM]=0.f;
f[j+5 *pitch*YDIM*ZDIM]=0.f;
f[j+6 *pitch*YDIM*ZDIM]=0.f;
f[j+7 *pitch*YDIM*ZDIM]=0.f;
f[j+8 *pitch*YDIM*ZDIM]=0.f;
f[j+9 *pitch*YDIM*ZDIM]=0.f;
f[j+10*pitch*YDIM*ZDIM]=0.f;
f[j+11*pitch*YDIM*ZDIM]=0.f;
f[j+12*pitch*YDIM*ZDIM]=0.f;
f[j+13*pitch*YDIM*ZDIM]=0.f;
f[j+14*pitch*YDIM*ZDIM]=0.f;
f[j+15*pitch*YDIM*ZDIM]=0.f;
f[j+16*pitch*YDIM*ZDIM]=0.f;
f[j+17*pitch*YDIM*ZDIM]=0.f;
f[j+18*pitch*YDIM*ZDIM]=0.f;
}
}
}
__global__ void initialize_LR(float *f, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.0f;
v = 0.0f;
w = 0.0f;
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
// f[j+0 *pitch*YLRDIM*ZLRDIM]= 1.0f/3.0f*(rho-1.5f*usqr);
// f[j+1 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// f[j+2 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// f[j+3 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// f[j+4 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// f[j+5 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
// f[j+6 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// f[j+7 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// f[j+8 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
// f[j+9 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// f[j+10*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
// f[j+11*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
// f[j+12*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// f[j+13*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
// f[j+14*pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// f[j+15*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
// f[j+16*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
// f[j+17*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// f[j+18*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
float feq0 = 0.1904761791f*rho;
float feq1 = 0.1031746045f*rho;
float feq2 = 0.1031746045f*rho;
float feq3 = 0.1031746045f*rho;
float feq4 = 0.1031746045f*rho;
float feq5 = 0.0158730149f*rho;
float feq6 = 0.0158730149f*rho;
float feq7 = 0.0158730149f*rho;
float feq8 = 0.0158730149f*rho;
float feq9 = 0.1031746045f*rho;
float feq10= 0.0158730149f*rho;
float feq11= 0.0158730149f*rho;
float feq12= 0.0158730149f*rho;
float feq13= 0.0158730149f*rho;
float feq14= 0.1031746045f*rho;
float feq15= 0.0158730149f*rho;
float feq16= 0.0158730149f*rho;
float feq17= 0.0158730149f*rho;
float feq18= 0.0158730149f*rho;
f[j+0 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq0 ;
f[j+1 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq1 ;
f[j+2 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq2 ;
f[j+3 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq3 ;
f[j+4 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq4 ;
f[j+5 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq5 ;
f[j+6 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq6 ;
f[j+7 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq7 ;
f[j+8 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq8 ;
f[j+9 *pitch*YLRDIM*ZLRDIM]= 0.1f;//feq9 ;
f[j+10*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq10;
f[j+11*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq11;
f[j+12*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq12;
f[j+13*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq13;
f[j+14*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq14;
f[j+15*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq15;
f[j+16*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq16;
f[j+17*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq17;
f[j+18*pitch*YLRDIM*ZLRDIM]= 0.1f;//feq18;
}
__global__ void initialize(float* f0, float* f1, float* f2,
float* f3, float* f4, float* f5,
float* f6, float* f7, float* f8, float* f9,
float* f10, float* f11, float* f12,
float* f13, float* f14, float* f15,
float* f16, float* f17, float* f18,
size_t pitch)//pitch in elements
//__global__ void initialize(void** f0in, void** f1in,
// int w, int h, int pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
// int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
// f1out[j] = tex2D(texRef_f2A,x,y+h*z);
float u,v,w,rho,feq,usqr;
rho = 1.0f;
u = 0.0f;
v = 0.0f;
w = 0.0f;
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
feq = 1.0f/3.0f*(rho-1.5f*usqr);
f0[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f1[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f2[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f3[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f4[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f5[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f6[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f7[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f8[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f9[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f10[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f11[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f12[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f13[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f14[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f15[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f16[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f17[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f18[j] = feq;
}
int main(int argc, char *argv[])
{
//int *image_d, *image_h;
ofstream output;
ofstream output2;
string FileName = CASENAME;
//output.open ("LBM1_out.dat");
output.open ((FileName+".dat").c_str());
output2.open ((FileName+".force").c_str());
size_t memsize, memsize2;
size_t pitch = 0;
size_t pitch2 = 0;
int i, n, nBlocks, nBlocks2, n2;
float omega, CharLength, omega2;
if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f){
cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl;
return 0;
}
CharLength = OBSTR*2.f;
omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
if(LRFACTOR == 0.25f){
omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega2-1.0f));
}
float SF_cf = omega*(1.0f-omega2)/((1.0f-omega)*omega2/LRFACTOR);
float SF_fc = 1.f/SF_cf;
cout<<"omega: "<<omega<<endl;
cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
cout<<"LRblocksize: "<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl;
cout<<"LRgrid: "<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
cout<<"TMAX: "<<TMAX<<endl;
cout<<"Method: "<<METHOD<<endl;
cout<<"Model: "<<MODEL<<endl;
nBlocks = ((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX)*(YDIM/BLOCKSIZEY+YDIM%BLOCKSIZEY)
*(ZDIM/BLOCKSIZEZ+ZDIM%BLOCKSIZEZ);
nBlocks2 = (XLRDIM/BLOCKSIZELRX+XLRDIM%BLOCKSIZELRX)*(YLRDIM/BLOCKSIZELRY+YLRDIM%BLOCKSIZELRY)
*(ZLRDIM/BLOCKSIZELRZ+ZLRDIM%BLOCKSIZELRZ);
int B = BLOCKSIZEX*BLOCKSIZEY*BLOCKSIZEZ;
int B2 = BLOCKSIZELRX*BLOCKSIZELRY*BLOCKSIZELRZ;
n = nBlocks*B;
n2 = nBlocks2*B2;
cout<<"nBlocks:"<<nBlocks<<endl;
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
dim3 grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),YDIM/BLOCKSIZEY,ZDIM/BLOCKSIZEZ);
dim3 threads2(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ);
dim3 grid2(XLRDIM/BLOCKSIZELRX,YLRDIM/BLOCKSIZELRY,ZLRDIM/BLOCKSIZELRZ);
memsize = n*sizeof(float);
//memsize_int = n*sizeof(int);
memsize2 = n2*sizeof(float);
//cudaExtent extent = make_cudaExtent(XDIM*sizeof(float),YDIM,ZDIM);
//image_h = (int *)malloc(memsize_int);
float *fA_h,*fA_d,*fB_d,*fC_h,*fC_d,*fD_d;
float *FX_h,*FY_h,*FZ_h,*FX_d,*FY_d,*FZ_d;
fA_h = (float *)malloc(memsize*19);
fC_h = (float *)malloc(memsize2*19);
FX_h = (float *)malloc(TMAX*sizeof(float));
FY_h = (float *)malloc(TMAX*sizeof(float));
FZ_h = (float *)malloc(TMAX*sizeof(float));
cudaMallocPitch((void **) &fA_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19);
cudaMallocPitch((void **) &fB_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19);
if(REFINEMENT == "YES"){
cudaMallocPitch((void **) &fC_d, &pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM*19);
cudaMallocPitch((void **) &fD_d, &pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM*19);
}
cudaMalloc((void **) &FX_d, TMAX*sizeof(float));
cudaMalloc((void **) &FY_d, TMAX*sizeof(float));
cudaMalloc((void **) &FZ_d, TMAX*sizeof(float));
cout<<pitch<<", "<<pitch2<<endl;
size_t pitch_elements = pitch/sizeof(float);
size_t pitch_elements2 = pitch2/sizeof(float);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
for (i = 0; i < n*19; i++)
{
fA_h[i] = i;
}
for (i = 0; i < n2*19; i++)
{
fC_h[i] = 0;
}
for (i = 0; i < TMAX; i++){
FX_h[i] = 0.f;
FY_h[i] = 0.f;
FZ_h[i] = 0.f;
}
cudaMemcpy(FX_d, FX_h, TMAX*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(FY_d, FY_h, TMAX*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(FZ_d, FZ_h, TMAX*sizeof(float), cudaMemcpyHostToDevice);
// for (i = 0; i < n; i++)
// {
// int x = i%XDIM;
// int y = (i/XDIM)%YDIM;
// int z = (i/XDIM)/YDIM;
//// image_h[i] = 0;
//// if(x < 1) image_h[i] = 1;//DirichletWest
//// if(x > XDIM-2) image_h[i] = 1;//BB
//// if(y < 1) image_h[i] = 1;//BB
//// if(y > YDIM-2) image_h[i] = 1;//BB
//// if(z < 1) image_h[i] = 1;//DirichletWest
//// if(z > ZDIM-2) image_h[i] = 1;//BB
// }
//cudaMemcpy(image_d, image_h, memsize_int, cudaMemcpyHostToDevice);
if(true)//texture settings
{
texRef_f0B.normalized = false;
texRef_f1B.normalized = false;
texRef_f2B.normalized = false;
texRef_f3B.normalized = false;
texRef_f4B.normalized = false;
texRef_f5B.normalized = false;
texRef_f6B.normalized = false;
texRef_f7B.normalized = false;
texRef_f8B.normalized = false;
texRef_f9B.normalized = false;
texRef_f10B.normalized = false;
texRef_f11B.normalized = false;
texRef_f12B.normalized = false;
texRef_f13B.normalized = false;
texRef_f14B.normalized = false;
texRef_f15B.normalized = false;
texRef_f16B.normalized = false;
texRef_f17B.normalized = false;
texRef_f18B.normalized = false;
texRef_f0B.filterMode = cudaFilterModeLinear;
texRef_f1B.filterMode = cudaFilterModeLinear;
texRef_f2B.filterMode = cudaFilterModeLinear;
texRef_f3B.filterMode = cudaFilterModeLinear;
texRef_f4B.filterMode = cudaFilterModeLinear;
texRef_f5B.filterMode = cudaFilterModeLinear;
texRef_f6B.filterMode = cudaFilterModeLinear;
texRef_f7B.filterMode = cudaFilterModeLinear;
texRef_f8B.filterMode = cudaFilterModeLinear;
texRef_f9B.filterMode = cudaFilterModeLinear;
texRef_f10B.filterMode = cudaFilterModeLinear;
texRef_f11B.filterMode = cudaFilterModeLinear;
texRef_f12B.filterMode = cudaFilterModeLinear;
texRef_f13B.filterMode = cudaFilterModeLinear;
texRef_f14B.filterMode = cudaFilterModeLinear;
texRef_f15B.filterMode = cudaFilterModeLinear;
texRef_f16B.filterMode = cudaFilterModeLinear;
texRef_f17B.filterMode = cudaFilterModeLinear;
texRef_f18B.filterMode = cudaFilterModeLinear;
texRef_f0A.normalized = false;
texRef_f1A.normalized = false;
texRef_f2A.normalized = false;
texRef_f3A.normalized = false;
texRef_f4A.normalized = false;
texRef_f5A.normalized = false;
texRef_f6A.normalized = false;
texRef_f7A.normalized = false;
texRef_f8A.normalized = false;
texRef_f9A.normalized = false;
texRef_f10A.normalized = false;
texRef_f11A.normalized = false;
texRef_f12A.normalized = false;
texRef_f13A.normalized = false;
texRef_f14A.normalized = false;
texRef_f15A.normalized = false;
texRef_f16A.normalized = false;
texRef_f17A.normalized = false;
texRef_f18A.normalized = false;
texRef_f0A.filterMode = cudaFilterModeLinear;
texRef_f1A.filterMode = cudaFilterModeLinear;
texRef_f2A.filterMode = cudaFilterModeLinear;
texRef_f3A.filterMode = cudaFilterModeLinear;
texRef_f4A.filterMode = cudaFilterModeLinear;
texRef_f5A.filterMode = cudaFilterModeLinear;
texRef_f6A.filterMode = cudaFilterModeLinear;
texRef_f7A.filterMode = cudaFilterModeLinear;
texRef_f8A.filterMode = cudaFilterModeLinear;
texRef_f9A.filterMode = cudaFilterModeLinear;
texRef_f10A.filterMode = cudaFilterModeLinear;
texRef_f11A.filterMode = cudaFilterModeLinear;
texRef_f12A.filterMode = cudaFilterModeLinear;
texRef_f13A.filterMode = cudaFilterModeLinear;
texRef_f14A.filterMode = cudaFilterModeLinear;
texRef_f15A.filterMode = cudaFilterModeLinear;
texRef_f16A.filterMode = cudaFilterModeLinear;
texRef_f17A.filterMode = cudaFilterModeLinear;
texRef_f18A.filterMode = cudaFilterModeLinear;
// if(REFINEMENT == "YES"){
texRef_f0C.normalized = false;
texRef_f1C.normalized = false;
texRef_f2C.normalized = false;
texRef_f3C.normalized = false;
texRef_f4C.normalized = false;
texRef_f5C.normalized = false;
texRef_f6C.normalized = false;
texRef_f7C.normalized = false;
texRef_f8C.normalized = false;
texRef_f9C.normalized = false;
texRef_f10C.normalized = false;
texRef_f11C.normalized = false;
texRef_f12C.normalized = false;
texRef_f13C.normalized = false;
texRef_f14C.normalized = false;
texRef_f15C.normalized = false;
texRef_f16C.normalized = false;
texRef_f17C.normalized = false;
texRef_f18C.normalized = false;
texRef_f0C.filterMode = cudaFilterModeLinear;
texRef_f1C.filterMode = cudaFilterModeLinear;
texRef_f2C.filterMode = cudaFilterModeLinear;
texRef_f3C.filterMode = cudaFilterModeLinear;
texRef_f4C.filterMode = cudaFilterModeLinear;
texRef_f5C.filterMode = cudaFilterModeLinear;
texRef_f6C.filterMode = cudaFilterModeLinear;
texRef_f7C.filterMode = cudaFilterModeLinear;
texRef_f8C.filterMode = cudaFilterModeLinear;
texRef_f9C.filterMode = cudaFilterModeLinear;
texRef_f10C.filterMode = cudaFilterModeLinear;
texRef_f11C.filterMode = cudaFilterModeLinear;
texRef_f12C.filterMode = cudaFilterModeLinear;
texRef_f13C.filterMode = cudaFilterModeLinear;
texRef_f14C.filterMode = cudaFilterModeLinear;
texRef_f15C.filterMode = cudaFilterModeLinear;
texRef_f16C.filterMode = cudaFilterModeLinear;
texRef_f17C.filterMode = cudaFilterModeLinear;
texRef_f18C.filterMode = cudaFilterModeLinear;
texRef_f0D.normalized = false;
texRef_f1D.normalized = false;
texRef_f2D.normalized = false;
texRef_f3D.normalized = false;
texRef_f4D.normalized = false;
texRef_f5D.normalized = false;
texRef_f6D.normalized = false;
texRef_f7D.normalized = false;
texRef_f8D.normalized = false;
texRef_f9D.normalized = false;
texRef_f10D.normalized = false;
texRef_f11D.normalized = false;
texRef_f12D.normalized = false;
texRef_f13D.normalized = false;
texRef_f14D.normalized = false;
texRef_f15D.normalized = false;
texRef_f16D.normalized = false;
texRef_f17D.normalized = false;
texRef_f18D.normalized = false;
texRef_f0D.filterMode = cudaFilterModeLinear;
texRef_f1D.filterMode = cudaFilterModeLinear;
texRef_f2D.filterMode = cudaFilterModeLinear;
texRef_f3D.filterMode = cudaFilterModeLinear;
texRef_f4D.filterMode = cudaFilterModeLinear;
texRef_f5D.filterMode = cudaFilterModeLinear;
texRef_f6D.filterMode = cudaFilterModeLinear;
texRef_f7D.filterMode = cudaFilterModeLinear;
texRef_f8D.filterMode = cudaFilterModeLinear;
texRef_f9D.filterMode = cudaFilterModeLinear;
texRef_f10D.filterMode = cudaFilterModeLinear;
texRef_f11D.filterMode = cudaFilterModeLinear;
texRef_f12D.filterMode = cudaFilterModeLinear;
texRef_f13D.filterMode = cudaFilterModeLinear;
texRef_f14D.filterMode = cudaFilterModeLinear;
texRef_f15D.filterMode = cudaFilterModeLinear;
texRef_f16D.filterMode = cudaFilterModeLinear;
texRef_f17D.filterMode = cudaFilterModeLinear;
texRef_f18D.filterMode = cudaFilterModeLinear;
// }
for(int i = 0; i<2; i++){
texRef_f0A.addressMode[i] = cudaAddressModeClamp;
texRef_f1A.addressMode[i] = cudaAddressModeClamp;
texRef_f2A.addressMode[i] = cudaAddressModeClamp;
texRef_f3A.addressMode[i] = cudaAddressModeClamp;
texRef_f4A.addressMode[i] = cudaAddressModeClamp;
texRef_f5A.addressMode[i] = cudaAddressModeClamp;
texRef_f6A.addressMode[i] = cudaAddressModeClamp;
texRef_f7A.addressMode[i] = cudaAddressModeClamp;
texRef_f8A.addressMode[i] = cudaAddressModeClamp;
texRef_f9A.addressMode[i] = cudaAddressModeClamp;
texRef_f10A.addressMode[i] = cudaAddressModeClamp;
texRef_f11A.addressMode[i] = cudaAddressModeClamp;
texRef_f12A.addressMode[i] = cudaAddressModeClamp;
texRef_f13A.addressMode[i] = cudaAddressModeClamp;
texRef_f14A.addressMode[i] = cudaAddressModeClamp;
texRef_f15A.addressMode[i] = cudaAddressModeClamp;
texRef_f16A.addressMode[i] = cudaAddressModeClamp;
texRef_f17A.addressMode[i] = cudaAddressModeClamp;
texRef_f18A.addressMode[i] = cudaAddressModeClamp;
texRef_f0B.addressMode[i] = cudaAddressModeClamp;
texRef_f1B.addressMode[i] = cudaAddressModeClamp;
texRef_f2B.addressMode[i] = cudaAddressModeClamp;
texRef_f3B.addressMode[i] = cudaAddressModeClamp;
texRef_f4B.addressMode[i] = cudaAddressModeClamp;
texRef_f5B.addressMode[i] = cudaAddressModeClamp;
texRef_f6B.addressMode[i] = cudaAddressModeClamp;
texRef_f7B.addressMode[i] = cudaAddressModeClamp;
texRef_f8B.addressMode[i] = cudaAddressModeClamp;
texRef_f9B.addressMode[i] = cudaAddressModeClamp;
texRef_f10B.addressMode[i] = cudaAddressModeClamp;
texRef_f11B.addressMode[i] = cudaAddressModeClamp;
texRef_f12B.addressMode[i] = cudaAddressModeClamp;
texRef_f13B.addressMode[i] = cudaAddressModeClamp;
texRef_f14B.addressMode[i] = cudaAddressModeClamp;
texRef_f15B.addressMode[i] = cudaAddressModeClamp;
texRef_f16B.addressMode[i] = cudaAddressModeClamp;
texRef_f17B.addressMode[i] = cudaAddressModeClamp;
texRef_f18B.addressMode[i] = cudaAddressModeClamp;
}
}
cudaMemcpy2D(fA_d,pitch ,fA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(fB_d,pitch ,fA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,cudaMemcpyHostToDevice);
if(REFINEMENT == "YES"){
cudaMemcpy2D(fC_d,pitch2,fC_h,XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(fD_d,pitch2,fC_h,XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM*19,cudaMemcpyHostToDevice);
}
// for (i = 0; i < n*19; i++)
// {
// fA_h[i] = 0;
// fC_h[i] = 1;
// }
if(true)//bind texture
{
cudaBindTexture2D(0,&texRef_f0A, fA_d ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f1A, fA_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f2A, fA_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f3A, fA_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f4A, fA_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f5A, fA_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f6A, fA_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f7A, fA_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f8A, fA_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f9A, fA_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f10A,fA_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f11A,fA_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f12A,fA_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f13A,fA_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f14A,fA_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f15A,fA_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f16A,fA_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f17A,fA_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f18A,fA_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f0B, fB_d ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f1B, fB_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f2B, fB_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f3B, fB_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f4B, fB_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f5B, fB_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f6B, fB_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f7B, fB_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f8B, fB_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f9B, fB_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f10B,fB_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f11B,fB_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f12B,fB_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f13B,fB_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f14B,fB_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f15B,fB_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f16B,fB_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f17B,fB_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch);
cudaBindTexture2D(0,&texRef_f18B,fB_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch);
// if(REFINEMENT == "YES"){
cudaBindTexture2D(0,&texRef_f0C, fC_d ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f1C, fC_d+pitch_elements2*YLRDIM*ZLRDIM ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f2C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*2 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f3C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*3 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f4C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*4 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f5C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*5 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f6C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*6 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f7C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*7 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f8C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*8 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f9C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*9 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f10C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*10,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f11C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*11,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f12C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*12,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f13C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*13,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f14C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*14,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f15C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*15,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f16C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*16,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f17C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*17,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f18C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*18,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f0D, fD_d ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f1D, fD_d+pitch_elements2*YLRDIM*ZLRDIM ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f2D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*2 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f3D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*3 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f4D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*4 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f5D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*5 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f6D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*6 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f7D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*7 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f8D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*8 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f9D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*9 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f10D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*10,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f11D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*11,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f12D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*12,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f13D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*13,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f14D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*14,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f15D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*15,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f16D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*16,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f17D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*17,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
cudaBindTexture2D(0,&texRef_f18D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*18,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2);
// }
}
initialize_single<<<grid, threads>>>(fA_d,pitch_elements);
initialize_single<<<grid, threads>>>(fB_d,pitch_elements);
if(REFINEMENT == "YES"){
initialize_LR<<<grid2, threads2>>>(fC_d,pitch_elements2);
initialize_LR<<<grid2, threads2>>>(fD_d,pitch_elements2);
}
cudaFuncSetCacheConfig(mrt_d_single,cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(mrt_d_hybAB,cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(mrt_d_hybAB_force,cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(LR_d_hybABCD_force,cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(LR_d_hybABCD,cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(LR_d_hybABCD2,cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(LR_d_hybABDC2,cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(LR_d_hybABDC_Interp,cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(mrt_d_hybBA,cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(mrt_d_hybBA_force,cudaFuncCachePreferL1);
// cudaFuncSetCacheConfig(LR_d_hybBACD,cudaFuncCachePreferL1);
// cudaFuncSetCacheConfig(LR_d_hybBACD2,cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(LR_d_hybBADC2,cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(LR_d_hybBADC_Interp,cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(ExtractFromC_d,cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(simple_copy,cudaFuncCachePreferL1);
struct timeval tdr0,tdr1;
double restime;
cudaDeviceSynchronize();
gettimeofday (&tdr0,NULL);
for(int t = 0; t<TMAX; t=t+2){
if(METHOD == "SINGLE"){
mrt_d_single<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements);
if(REFINEMENT == "YES"){
LR_d_ABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2);
LR_d_ABDC<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf);
ExtractFromC_d<<<grid, threads>>>(fB_d,pitch_elements,SF_fc);
}
mrt_d_single<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements);
if(REFINEMENT == "YES"){
LR_d_BACD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2);
LR_d_BADC<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf);
ExtractFromC_d<<<grid, threads>>>(fA_d,pitch_elements,SF_fc);
}
}
else if(METHOD == "HYB"){
if(t >= STARTF && REFINEMENT == "NO")
mrt_d_hybAB_force<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t);
else
mrt_d_hybAB<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements);
if(REFINEMENT == "YES"){
if(LRFACTOR == 0.5f)
{
if(t >= STARTF)
LR_d_hybABCD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t);
else
LR_d_hybABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2);
// LR_d_hybABDC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf);
}
else if(LRFACTOR == 0.25f)
{
if(t >= STARTF)
LR_d_hybABCD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t);
else
LR_d_hybABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2);
LR_d_hybABDC2<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf,2);
LR_d_hybABCD2<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,3);
LR_d_hybABDC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf);
}
ExtractFromC_d<<<grid, threads>>>(fB_d,pitch_elements,SF_fc);
}
if(t >= STARTF && REFINEMENT == "NO")
mrt_d_hybBA_force<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t+1);
else
mrt_d_hybBA<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements);
if(REFINEMENT == "YES"){
if(LRFACTOR == 0.5f)
{
// if(t >= STARTF)
// LR_d_hybABCD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t+1);
// else
// LR_d_hybABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2);
//
// LR_d_hybBADC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf);
}
else if(LRFACTOR == 0.25f)
{
if(t >= STARTF)
LR_d_hybABCD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t+1);
else
LR_d_hybABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2);
LR_d_hybBADC2<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf,2);
LR_d_hybABCD2<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,3);
LR_d_hybBADC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf);
}
ExtractFromC_d<<<grid, threads>>>(fA_d,pitch_elements,SF_fc);
}
}
// else if(METHOD == "TEXT"){
// mrt_d_textAB<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements);
// mrt_d_textBA<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements);
// }
//
// else if(METHOD == "SHARED"){
// mrt_d_shared<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements);
// mrt_d_shared<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements);
// }
// simple_copy<<<grid, threads>>>(fA_d,fB_d,pitch_elements);
// simple_copy<<<grid, threads>>>(fB_d,fA_d,pitch_elements);
//
// simple_text<<<grid, threads>>>(fA_d,fB_d,pitch_elements);
// simple_text<<<grid, threads>>>(fB_d,fA_d,pitch_elements);
if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n";
}
cudaDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
if(REFINEMENT == "YES"){
Nodes = (XDIM*YDIM*ZDIM+XLRDIM*YLRDIM*ZLRDIM*LRLEVEL);
}
else{
Nodes = XDIM*YDIM*ZDIM;
}
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)"<<endl;
// <<double((XDIM*YDIM*ZDIM)*double(TMAX/1000000.f))/restime<<"MLUPS)"<<endl;
cout<<XDIM<<","<<YDIM<<","<<ZDIM<<","<<TMAX<<","<<restime<<endl;
if(true){
cudaUnbindTexture(texRef_f0A);
cudaUnbindTexture(texRef_f1A);
cudaUnbindTexture(texRef_f2A);
cudaUnbindTexture(texRef_f3A);
cudaUnbindTexture(texRef_f4A);
cudaUnbindTexture(texRef_f5A);
cudaUnbindTexture(texRef_f6A);
cudaUnbindTexture(texRef_f7A);
cudaUnbindTexture(texRef_f8A);
cudaUnbindTexture(texRef_f9A);
cudaUnbindTexture(texRef_f10A);
cudaUnbindTexture(texRef_f11A);
cudaUnbindTexture(texRef_f12A);
cudaUnbindTexture(texRef_f13A);
cudaUnbindTexture(texRef_f14A);
cudaUnbindTexture(texRef_f15A);
cudaUnbindTexture(texRef_f16A);
cudaUnbindTexture(texRef_f17A);
cudaUnbindTexture(texRef_f18A);
cudaUnbindTexture(texRef_f0B);
cudaUnbindTexture(texRef_f1B);
cudaUnbindTexture(texRef_f2B);
cudaUnbindTexture(texRef_f3B);
cudaUnbindTexture(texRef_f4B);
cudaUnbindTexture(texRef_f5B);
cudaUnbindTexture(texRef_f6B);
cudaUnbindTexture(texRef_f7B);
cudaUnbindTexture(texRef_f8B);
cudaUnbindTexture(texRef_f9B);
cudaUnbindTexture(texRef_f10B);
cudaUnbindTexture(texRef_f11B);
cudaUnbindTexture(texRef_f12B);
cudaUnbindTexture(texRef_f13B);
cudaUnbindTexture(texRef_f14B);
cudaUnbindTexture(texRef_f15B);
cudaUnbindTexture(texRef_f16B);
cudaUnbindTexture(texRef_f17B);
cudaUnbindTexture(texRef_f18B);
cudaUnbindTexture(texRef_f0C);
cudaUnbindTexture(texRef_f1C);
cudaUnbindTexture(texRef_f2C);
cudaUnbindTexture(texRef_f3C);
cudaUnbindTexture(texRef_f4C);
cudaUnbindTexture(texRef_f5C);
cudaUnbindTexture(texRef_f6C);
cudaUnbindTexture(texRef_f7C);
cudaUnbindTexture(texRef_f8C);
cudaUnbindTexture(texRef_f9C);
cudaUnbindTexture(texRef_f10C);
cudaUnbindTexture(texRef_f11C);
cudaUnbindTexture(texRef_f12C);
cudaUnbindTexture(texRef_f13C);
cudaUnbindTexture(texRef_f14C);
cudaUnbindTexture(texRef_f15C);
cudaUnbindTexture(texRef_f16C);
cudaUnbindTexture(texRef_f17C);
cudaUnbindTexture(texRef_f18C);
cudaUnbindTexture(texRef_f0D);
cudaUnbindTexture(texRef_f1D);
cudaUnbindTexture(texRef_f2D);
cudaUnbindTexture(texRef_f3D);
cudaUnbindTexture(texRef_f4D);
cudaUnbindTexture(texRef_f5D);
cudaUnbindTexture(texRef_f6D);
cudaUnbindTexture(texRef_f7D);
cudaUnbindTexture(texRef_f8D);
cudaUnbindTexture(texRef_f9D);
cudaUnbindTexture(texRef_f10D);
cudaUnbindTexture(texRef_f11D);
cudaUnbindTexture(texRef_f12D);
cudaUnbindTexture(texRef_f13D);
cudaUnbindTexture(texRef_f14D);
cudaUnbindTexture(texRef_f15D);
cudaUnbindTexture(texRef_f16D);
cudaUnbindTexture(texRef_f17D);
cudaUnbindTexture(texRef_f18D);
}
cudaMemcpy2D(fA_h,XDIM*sizeof(float),fA_d,pitch,XDIM*sizeof(float),YDIM*ZDIM*19,cudaMemcpyDeviceToHost);
if(REFINEMENT == "YES"){
cudaMemcpy2D(fC_h,XLRDIM*sizeof(float),fD_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM*19,cudaMemcpyDeviceToHost);
}
cudaMemcpy(FX_h, FX_d, TMAX*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(FY_h, FY_d, TMAX*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(FZ_h, FZ_d, TMAX*sizeof(float), cudaMemcpyDeviceToHost);
//cudaMemcpy(image_h, image_d, memsize_int, cudaMemcpyDeviceToHost);
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM<<"\n";
int row = 0;
int col = 0;
int dep = 0;
i = 0;
float rho, u, v, w;
int j;
for(dep = 0; dep<ZDIM; dep++){
for(row = 0; row<YDIM; row++){
for(col = 0; col<XDIM; col++){
i = dep*XDIM*YDIM+row*XDIM+col;
// rho = 0;
rho = fA_h[i];
for(j = 1; j<19; j++)
rho+=fA_h[i+XDIM*YDIM*ZDIM*j];
// rho = f0_h[i]+f1_h[i]+f2_h[i]+f3_h[i]+f4_h[i]+f5_h[i]+f6_h[i]+f7_h[i]+f8_h[i]+f9_h[i]+
// f10_h[i]+f11_h[i]+f12_h[i]+f13_h[i]+f14_h[i]+f15_h[i]+f16_h[i]+f17_h[i]+f18_h[i];
u = fA_h[i+XDIM*YDIM*ZDIM*1]-fA_h[i+XDIM*YDIM*ZDIM*3]+fA_h[i+XDIM*YDIM*ZDIM*5]-fA_h[i+XDIM*YDIM*ZDIM*6]-
fA_h[i+XDIM*YDIM*ZDIM*7]+fA_h[i+XDIM*YDIM*ZDIM*8]+fA_h[i+XDIM*YDIM*ZDIM*10]-fA_h[i+XDIM*YDIM*ZDIM*12]
+fA_h[i+XDIM*YDIM*ZDIM*15]-fA_h[i+XDIM*YDIM*ZDIM*17];
v = fA_h[i+XDIM*YDIM*ZDIM*2]-fA_h[i+XDIM*YDIM*ZDIM*4]+fA_h[i+XDIM*YDIM*ZDIM*5]+fA_h[i+XDIM*YDIM*ZDIM*6]-fA_h[i+XDIM*YDIM*ZDIM*7]-fA_h[i+XDIM*YDIM*ZDIM*8]+fA_h[i+XDIM*YDIM*ZDIM*11]-fA_h[i+XDIM*YDIM*ZDIM*13]+fA_h[i+XDIM*YDIM*ZDIM*16]-fA_h[i+XDIM*YDIM*ZDIM*18];
w = fA_h[i+XDIM*YDIM*ZDIM*9]+fA_h[i+XDIM*YDIM*ZDIM*10]+fA_h[i+XDIM*YDIM*ZDIM*11]+fA_h[i+XDIM*YDIM*ZDIM*12]+fA_h[i+XDIM*YDIM*ZDIM*13]-fA_h[i+XDIM*YDIM*ZDIM*14]-fA_h[i+XDIM*YDIM*ZDIM*15]-fA_h[i+XDIM*YDIM*ZDIM*16]-fA_h[i+XDIM*YDIM*ZDIM*17]-fA_h[i+XDIM*YDIM*ZDIM*18];
output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<w<<","<<rho<<endl;
// output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<fA_h[i+XDIM*YDIM*ZDIM*1]<<","<<rho<<endl;
}
}
}
if(REFINEMENT == "YES"){
output<<endl<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\"\n";
output<<"ZONE F=POINT, I="<<XLRDIM-0<<", J="<<YLRDIM-0<<", K="<<ZLRDIM-0<<"\n";
for(dep = 0; dep<ZLRDIM-0; dep++){
for(row = 0; row<YLRDIM-0; row++){
for(col = 0; col<XLRDIM-0; col++){
i = dep*XLRDIM*YLRDIM+row*XLRDIM+col;
rho = fC_h[i];
for(j = 1; j<19; j++)
rho+=fC_h[i+XLRDIM*YLRDIM*ZLRDIM*j];
u = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*1]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*3]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*5]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*6]-
fC_h[i+XLRDIM*YLRDIM*ZLRDIM*7]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*8]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*10]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*12]
+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*15]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*17];
v = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*2]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*4]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*5]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*6]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*7]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*8]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*11]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*13]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*16]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*18];
w = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*9]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*10]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*11]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*12]+fC_h[i+XLRDIM*YLRDIM*ZLRDIM*13]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*14]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*15]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*16]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*17]-fC_h[i+XLRDIM*YLRDIM*ZLRDIM*18];
// u=2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*1]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*3 ]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*5 ]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*8 ] ;
// v=2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*2]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*4 ]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*5 ]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*8 ] ;
// w=2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*9]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*10]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*11]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*12] ;
//
// u=-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*6 ]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*7 ]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*10]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*12]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*15]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*17];
// v=+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*6 ]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*7 ]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*11]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*13]+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*16]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*18];
// w=+2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*13]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*14]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*15]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*16]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*17]-2.f*fC_h[i+XLRDIM*YLRDIM*ZLRDIM*18];
// output<<LRX0+col*LRFACTOR<<", "<<LRY0+row*LRFACTOR<<", "<<LRZ0+dep*LRFACTOR<<", "<<u<<","<<v<<","<<w<<","<<rho<<endl;
output<<LRX0+col*LRFACTOR<<", "<<LRY0+row*LRFACTOR<<", "<<LRZ0+dep*LRFACTOR<<", "<<fC_h[i+XLRDIM*YLRDIM*ZLRDIM*1]<<","<<fC_h[i+XLRDIM*YLRDIM*ZLRDIM*2]<<","<<fC_h[i+XLRDIM*YLRDIM*ZLRDIM*3]<<","<<rho<<endl;
}
}
}
}
output.close();
//for(int t = STARTF-1; t<TMAX; t++){
for(int t = 0; t<TMAX; t++){
output2<<t<<", "<<FX_h[t]/(0.5f*UMAX*UMAX*OBSTR*OBSTR*3.14158f)<<", "
<<FY_h[t]/(0.5f*UMAX*UMAX*OBSTR*OBSTR*3.14158f)<<", "
<<FZ_h[t]/(0.5f*UMAX*UMAX*OBSTR*OBSTR*3.14158f)<<endl;
}
output2.close();
//cudaFree(image_d);
cudaFree(fA_d);
cudaFree(fB_d);
cudaFree(fC_d);
cudaFree(fD_d);
return(0);
}
|
7f062f52de6682c26f85ee7139a8b32d5dce7452.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Basic CUDA file for testing compiler flags.
*/
__device__ int inner()
{
return -1;
}
__global__ void test()
{
inner();
}
int main()
{
hipLaunchKernelGGL(( test), dim3(1),dim3(1), 0, 0, );
return 0;
}
|
7f062f52de6682c26f85ee7139a8b32d5dce7452.cu
|
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Basic CUDA file for testing compiler flags.
*/
__device__ int inner()
{
return -1;
}
__global__ void test()
{
inner();
}
int main()
{
test<<<1,1>>>();
return 0;
}
|
7db2f2999e0bdc1678719bfa59d713278c1b0aa9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include "math.h"
#define TIMER_CREATE(t) \
hipEvent_t t##_start, t##_end; \
hipEventCreate(&t##_start); \
hipEventCreate(&t##_end);
#define TIMER_START(t) \
hipEventRecord(t##_start); \
hipEventSynchronize(t##_start); \
#define TIMER_END(t) \
hipEventRecord(t##_end); \
hipEventSynchronize(t##_end); \
hipEventElapsedTime(&t, t##_start, t##_end); \
hipEventDestroy(t##_start); \
hipEventDestroy(t##_end);
#define TILE_SIZE 16
#define CUDA_TIMING
unsigned char *input_gpu;
unsigned char *output_gpu;
/*******************************************************/
/* CUDA Error Function */
/*******************************************************/
inline hipError_t checkCuda(hipError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// GPU kernel and functions
__global__ void kernel(unsigned char *input,
unsigned char *output,
unsigned int height,
unsigned int width){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
if (x > 0 && x < width-1 && y > 0 && y < height-1 ){
int loc = y*width+x;
int i1 = ((int)input[loc-width-1]) * -1;
int i2 = ((int)input[loc-width]) * -2;
int i3 = ((int)input[loc-width+1]) * -1;
int i4 = ((int)input[loc+width-1]) * 1;
int i5 = ((int)input[loc+width]) * 2;
int i6 = ((int)input[loc+width-1]) * 1;
int it=0;
it = (i1 + i2 + i3 + i4 + i5 + i6)/6;
int d1 = ((int)input[loc-width-1]) * 1;
int d2 = ((int)input[loc-1]) * 2;
int d3 = ((int)input[loc-1]) * 1;
int d4 = ((int)input[loc-width+1]) * -1;
int d5 = ((int)input[loc+1]) * -2;
int d6 = ((int)input[loc+width+1]) * -1;
int dt=0;
dt = (d1 + d2 + d3 + d4 + d5 + d6)/6;
int total=0;
total = (int)(sqrt((float)it*(float)it + (float)dt*(float)dt));
output[loc] = (unsigned char)total;
}
}
void transpose_img(unsigned char *in_mat,
unsigned char *out_mat,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(hipMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(hipMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(hipMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(hipMemcpy(input_gpu,
in_mat,
height*width*sizeof(char),
hipMemcpyHostToDevice));
checkCuda(hipDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// Kernel Call
hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, input_gpu, output_gpu, height, width);
checkCuda(hipDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(hipMemcpy(out_mat,
output_gpu,
height*width*sizeof(unsigned char),
hipMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(hipFree(output_gpu));
checkCuda(hipFree(input_gpu));
}
|
7db2f2999e0bdc1678719bfa59d713278c1b0aa9.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#include "math.h"
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
#define TILE_SIZE 16
#define CUDA_TIMING
unsigned char *input_gpu;
unsigned char *output_gpu;
/*******************************************************/
/* CUDA Error Function */
/*******************************************************/
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// GPU kernel and functions
__global__ void kernel(unsigned char *input,
unsigned char *output,
unsigned int height,
unsigned int width){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
if (x > 0 && x < width-1 && y > 0 && y < height-1 ){
int loc = y*width+x;
int i1 = ((int)input[loc-width-1]) * -1;
int i2 = ((int)input[loc-width]) * -2;
int i3 = ((int)input[loc-width+1]) * -1;
int i4 = ((int)input[loc+width-1]) * 1;
int i5 = ((int)input[loc+width]) * 2;
int i6 = ((int)input[loc+width-1]) * 1;
int it=0;
it = (i1 + i2 + i3 + i4 + i5 + i6)/6;
int d1 = ((int)input[loc-width-1]) * 1;
int d2 = ((int)input[loc-1]) * 2;
int d3 = ((int)input[loc-1]) * 1;
int d4 = ((int)input[loc-width+1]) * -1;
int d5 = ((int)input[loc+1]) * -2;
int d6 = ((int)input[loc+width+1]) * -1;
int dt=0;
dt = (d1 + d2 + d3 + d4 + d5 + d6)/6;
int total=0;
total = (int)(sqrt((float)it*(float)it + (float)dt*(float)dt));
output[loc] = (unsigned char)total;
}
}
void transpose_img(unsigned char *in_mat,
unsigned char *out_mat,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
in_mat,
height*width*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// Kernel Call
kernel<<<dimGrid, dimBlock>>>(input_gpu, output_gpu, height, width);
checkCuda(cudaDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(cudaMemcpy(out_mat,
output_gpu,
height*width*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
}
|
a0ff6121bc22ee5870b1c5eb750499b6928609a1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020 Florian Wende ([email protected])
//
// Distributed under the BSD 2-clause Software License
// (See accompanying file LICENSE)
#include <iostream>
#include <memory>
#include <gtest/gtest.h>
#include <field/Field.hpp>
#include <tuple/Tuple.hpp>
using namespace ::fw::dataTypes;
using namespace ::fw::memory;
using TypeX = int;
using TypeY = float;
using TypeZ = short;
using ElementT = Tuple<TypeX, TypeY, TypeZ>;
auto loop_3d = [] (auto& field, auto&& loop_body)
{
for (SizeT z = 0; z < field.Size(2); ++z)
{
for (SizeT y = 0; y < field.Size(1); ++y)
{
for (SizeT x = 0; x < field.Size(0); ++x)
{
loop_body(field[z][y][x]);
}
}
}
};
template <typename Container>
__global__
void Kernel_1(Container field)
{
const SizeT x = blockIdx.x * blockDim.x + threadIdx.x;
const SizeT y = blockIdx.y * blockDim.y + threadIdx.y;
const SizeT z = blockIdx.z * blockDim.z + threadIdx.z;
const SizeT index = (z * field.Size(1) + y) * field.Size(0) + x;
if (x < field.Size(0) && y < field.Size(1) && z < field.Size(2))
{
field[z][y][x] = index;
}
}
template <typename Container>
__global__
void Kernel_2(Container field)
{
const SizeT x = blockIdx.x * blockDim.x + threadIdx.x;
const SizeT y = blockIdx.y * blockDim.y + threadIdx.y;
const SizeT z = blockIdx.z * blockDim.z + threadIdx.z;
const SizeT index = (z * field.Size(1) + y) * field.Size(0) + x;
if (x < field.Size(0) && y < field.Size(1) && z < field.Size(2))
{
field[z][y][x].x = 3 * index + 0;
field[z][y][x].y = 3 * index + 1;
field[z][y][x].z = 3 * index + 2;
}
}
TEST(Field, aos_3d_assign_1dindex_as_value)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::AoS> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != 0) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
hipLaunchKernelGGL(( Kernel_1), dim3(grid), dim3(block), 0, 0, field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item != index++) output_is_correct = false; });
return output_is_correct;
} ());
}
TEST(Field, aos_3d_assign_1dindex_as_value_componentwise)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::AoS> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != 0) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
hipLaunchKernelGGL(( Kernel_2), dim3(grid), dim3(block), 0, 0, field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item.x != (3 * index) || item.y != (3 * index + 1) || item.z != (3 * index + 2)) output_is_correct = false; ++index; });
return output_is_correct;
} ());
}
TEST(Field, soa_3d_assign_1dindex_as_value)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::SoA> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != ElementT(0)) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
hipLaunchKernelGGL(( Kernel_1), dim3(grid), dim3(block), 0, 0, field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item.x != index || item.y != index || item.z != index) output_is_correct = false; ++index; });
return output_is_correct;
} ());
}
TEST(Field, soa_3d_assign_1dindex_as_value_componentwise)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::SoA> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != ElementT(0)) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
hipLaunchKernelGGL(( Kernel_2), dim3(grid), dim3(block), 0, 0, field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item.x != (3 * index) || item.y != (3 * index + 1) || item.z != (3 * index + 2)) output_is_correct = false; ++index; });
return output_is_correct;
} ());
}
TEST(Field, soai_3d_assign_1dindex_as_value)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::SoAi> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != ElementT(0)) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
hipLaunchKernelGGL(( Kernel_1), dim3(grid), dim3(block), 0, 0, field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item.x != index || item.y != index || item.z != index) output_is_correct = false; ++index; });
return output_is_correct;
} ());
}
TEST(Field, soai_3d_assign_1dindex_as_value_componentwise)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::SoAi> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != ElementT(0)) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
hipLaunchKernelGGL(( Kernel_2), dim3(grid), dim3(block), 0, 0, field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item.x != (3 * index) || item.y != (3 * index + 1) || item.z != (3 * index + 2)) output_is_correct = false; ++index; });
return output_is_correct;
} ());
}
TEST(Field, aosoa_3d_assign_1dindex_as_value)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::AoSoA> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != ElementT(0)) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
hipLaunchKernelGGL(( Kernel_1), dim3(grid), dim3(block), 0, 0, field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item.x != index || item.y != index || item.z != index) output_is_correct = false; ++index; });
return output_is_correct;
} ());
}
TEST(Field, aosoa_3d_assign_1dindex_as_value_componentwise)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::AoSoA> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != ElementT(0)) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
hipLaunchKernelGGL(( Kernel_2), dim3(grid), dim3(block), 0, 0, field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item.x != (3 * index) || item.y != (3 * index + 1) || item.z != (3 * index + 2)) output_is_correct = false; ++index; });
return output_is_correct;
} ());
}
int main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
a0ff6121bc22ee5870b1c5eb750499b6928609a1.cu
|
// Copyright (c) 2020 Florian Wende ([email protected])
//
// Distributed under the BSD 2-clause Software License
// (See accompanying file LICENSE)
#include <iostream>
#include <memory>
#include <gtest/gtest.h>
#include <field/Field.hpp>
#include <tuple/Tuple.hpp>
using namespace ::fw::dataTypes;
using namespace ::fw::memory;
using TypeX = int;
using TypeY = float;
using TypeZ = short;
using ElementT = Tuple<TypeX, TypeY, TypeZ>;
auto loop_3d = [] (auto& field, auto&& loop_body)
{
for (SizeT z = 0; z < field.Size(2); ++z)
{
for (SizeT y = 0; y < field.Size(1); ++y)
{
for (SizeT x = 0; x < field.Size(0); ++x)
{
loop_body(field[z][y][x]);
}
}
}
};
template <typename Container>
__global__
void Kernel_1(Container field)
{
const SizeT x = blockIdx.x * blockDim.x + threadIdx.x;
const SizeT y = blockIdx.y * blockDim.y + threadIdx.y;
const SizeT z = blockIdx.z * blockDim.z + threadIdx.z;
const SizeT index = (z * field.Size(1) + y) * field.Size(0) + x;
if (x < field.Size(0) && y < field.Size(1) && z < field.Size(2))
{
field[z][y][x] = index;
}
}
template <typename Container>
__global__
void Kernel_2(Container field)
{
const SizeT x = blockIdx.x * blockDim.x + threadIdx.x;
const SizeT y = blockIdx.y * blockDim.y + threadIdx.y;
const SizeT z = blockIdx.z * blockDim.z + threadIdx.z;
const SizeT index = (z * field.Size(1) + y) * field.Size(0) + x;
if (x < field.Size(0) && y < field.Size(1) && z < field.Size(2))
{
field[z][y][x].x = 3 * index + 0;
field[z][y][x].y = 3 * index + 1;
field[z][y][x].z = 3 * index + 2;
}
}
TEST(Field, aos_3d_assign_1dindex_as_value)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::AoS> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != 0) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
Kernel_1<<<grid, block>>>(field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item != index++) output_is_correct = false; });
return output_is_correct;
} ());
}
TEST(Field, aos_3d_assign_1dindex_as_value_componentwise)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::AoS> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != 0) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
Kernel_2<<<grid, block>>>(field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item.x != (3 * index) || item.y != (3 * index + 1) || item.z != (3 * index + 2)) output_is_correct = false; ++index; });
return output_is_correct;
} ());
}
TEST(Field, soa_3d_assign_1dindex_as_value)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::SoA> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != ElementT(0)) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
Kernel_1<<<grid, block>>>(field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item.x != index || item.y != index || item.z != index) output_is_correct = false; ++index; });
return output_is_correct;
} ());
}
TEST(Field, soa_3d_assign_1dindex_as_value_componentwise)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::SoA> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != ElementT(0)) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
Kernel_2<<<grid, block>>>(field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item.x != (3 * index) || item.y != (3 * index + 1) || item.z != (3 * index + 2)) output_is_correct = false; ++index; });
return output_is_correct;
} ());
}
TEST(Field, soai_3d_assign_1dindex_as_value)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::SoAi> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != ElementT(0)) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
Kernel_1<<<grid, block>>>(field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item.x != index || item.y != index || item.z != index) output_is_correct = false; ++index; });
return output_is_correct;
} ());
}
TEST(Field, soai_3d_assign_1dindex_as_value_componentwise)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::SoAi> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != ElementT(0)) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
Kernel_2<<<grid, block>>>(field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item.x != (3 * index) || item.y != (3 * index + 1) || item.z != (3 * index + 2)) output_is_correct = false; ++index; });
return output_is_correct;
} ());
}
TEST(Field, aosoa_3d_assign_1dindex_as_value)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::AoSoA> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != ElementT(0)) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
Kernel_1<<<grid, block>>>(field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item.x != index || item.y != index || item.z != index) output_is_correct = false; ++index; });
return output_is_correct;
} ());
}
TEST(Field, aosoa_3d_assign_1dindex_as_value_componentwise)
{
constexpr SizeT nx = 23;
constexpr SizeT ny = 7;
constexpr SizeT nz = 13;
Field<ElementT, 3, DataLayout::AoSoA> field({nx, ny, nz}, true);
EXPECT_EQ(true, [&field]() {
bool all_zero = true;
loop_3d(field, [&all_zero](auto&& item) { if (item != ElementT(0)) all_zero = false; });
return all_zero;
} ());
dim3 block{128, 1, 1};
dim3 grid{(nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y, (nz + (block.z - 1)) / block.z};
Kernel_2<<<grid, block>>>(field.DeviceData());
field.CopyDeviceToHost();
EXPECT_EQ(true, [&field]() {
bool output_is_correct = true;
SizeT index = 0;
loop_3d(field, [&output_is_correct, &index](auto&& item) { if (item.x != (3 * index) || item.y != (3 * index + 1) || item.z != (3 * index + 2)) output_is_correct = false; ++index; });
return output_is_correct;
} ());
}
int main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
f24f51e1997a86daaf6274fe8e9632b840912d1e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <cstdlib>
hipError_t cudaMul(int* c, const int* a, const int* b, unsigned int size);
void multi(int row1, int col1, int col2, int* a, int* b, int* c);
const int BLOCK_SIZE = 16;
__global__ void matMult(int* a, int* b, int* c, int n) {
int bx = blockIdx.x, blocky = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int aBegin = n * BLOCK_SIZE * by;
int aEnd = aBegin + n - 1;
int bBegin = BLOCK_SIZE * bx;
int aStep = BLOCK_SIZE, bStep = BLOCK_SIZE * n;
int sum = 0;
for (int iaA = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep)
{
__shared__ int as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int bs[BLOCK_SIZE][BLOCK_SIZE];
as [ty][tx] = a[ia + n * ty + tx];
bs [ty][tx] = b[ib + n * ty + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++)
sum += as[ty][k] * bs[k][tx];
__syncthreads();
}
c[n * BLOCK_SIZE * by + BLOCK_SIZE * bx + n * ty + tx] = sum;
}
void multi(int row1, int col1, int col2, int* a, int* b, int* c) {
int size = row1 * col2;
for (int i = 0; i < row1; i++) {
for (int j = 0; j < col2; j++) {
int sum = 0;
for (int k = 0; k < col1; k++)
sum = sum + a[i * col1 + k] * b[k * col2 + j];
c[i * col2 + j] = sum;
}
}
}
int main()
{
setlocale(LC_ALL, "Rus");
int N = 2000;
const int arraySize1 = N * N; const int arraySize2 = N * N; const int arraySize3 = N * N;
int* a = new int[arraySize1]; int* b = new int[arraySize2]; int* c = new int[arraySize3]; int* d = new int[arraySize3];
for (int i = 0; i < arraySize1; ++i) { a[i] = rand() % 20; }
for (int i = 0; i < arraySize2; ++i) { b[i] = rand() % 20; }
if (col1 != row2) { cout << " !"; }
clock_t begin = clock();
multi(N, N, N, a, b, c);
double t = double(clock() - begin) * 1000 / CLOCKS_PER_SEC;
cout << " CPU = " << t;
hipError_t cudaStatus = cudaMul(a, b, c, N);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "\n");
return 1;
}
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "\n");
return 1;
}
free(a);
free(b);
free(c);
free(d);
return 0;
}
hipError_t cudaMul(int* c, const int* a, const int* b, unsigned int N)
{
const int size = N * N;
int* dev_a = 0; int* dev_b = 0; int* dev_c = 0;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "");
goto Error;
}
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "");
goto Error;
}
clock_t beginD = clock();
multiplyOnDevice << <dim3(N / BLOCK_SIZE, N / BLOCK_SIZE), dim3(BLOCK_SIZE, BLOCK_SIZE) >> > (dev_c, dev_a, dev_b, N);
hipDeviceSynchronize();
double deviceTime = double(clock() - beginD) * 1000 / CLOCKS_PER_SEC;
printf("", deviceTime);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
|
f24f51e1997a86daaf6274fe8e9632b840912d1e.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <cstdlib>
cudaError_t cudaMul(int* c, const int* a, const int* b, unsigned int size);
void multi(int row1, int col1, int col2, int* a, int* b, int* c);
const int BLOCK_SIZE = 16;
__global__ void matMult(int* a, int* b, int* c, int n) {
int bx = blockIdx.x, blocky = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int aBegin = n * BLOCK_SIZE * by;
int aEnd = aBegin + n - 1;
int bBegin = BLOCK_SIZE * bx;
int aStep = BLOCK_SIZE, bStep = BLOCK_SIZE * n;
int sum = 0;
for (int iaA = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep)
{
__shared__ int as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int bs[BLOCK_SIZE][BLOCK_SIZE];
as [ty][tx] = a[ia + n * ty + tx];
bs [ty][tx] = b[ib + n * ty + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++)
sum += as[ty][k] * bs[k][tx];
__syncthreads();
}
c[n * BLOCK_SIZE * by + BLOCK_SIZE * bx + n * ty + tx] = sum;
}
void multi(int row1, int col1, int col2, int* a, int* b, int* c) {
int size = row1 * col2;
for (int i = 0; i < row1; i++) {
for (int j = 0; j < col2; j++) {
int sum = 0;
for (int k = 0; k < col1; k++)
sum = sum + a[i * col1 + k] * b[k * col2 + j];
c[i * col2 + j] = sum;
}
}
}
int main()
{
setlocale(LC_ALL, "Rus");
int N = 2000;
const int arraySize1 = N * N; const int arraySize2 = N * N; const int arraySize3 = N * N;
int* a = new int[arraySize1]; int* b = new int[arraySize2]; int* c = new int[arraySize3]; int* d = new int[arraySize3];
for (int i = 0; i < arraySize1; ++i) { a[i] = rand() % 20; }
for (int i = 0; i < arraySize2; ++i) { b[i] = rand() % 20; }
if (col1 != row2) { cout << "Умножение невозможно!"; }
clock_t begin = clock();
multi(N, N, N, a, b, c);
double t = double(clock() - begin) * 1000 / CLOCKS_PER_SEC;
cout << "Время вычислений на CPU = " << t;
cudaError_t cudaStatus = cudaMul(a, b, c, N);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Ошибка\n");
return 1;
}
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Ошибка\n");
return 1;
}
free(a);
free(b);
free(c);
free(d);
return 0;
}
cudaError_t cudaMul(int* c, const int* a, const int* b, unsigned int N)
{
const int size = N * N;
int* dev_a = 0; int* dev_b = 0; int* dev_c = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Ошибка");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Ошибка");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Ошибка");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Ошибка");
goto Error;
}
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Ошибка");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Ошибка");
goto Error;
}
clock_t beginD = clock();
multiplyOnDevice << <dim3(N / BLOCK_SIZE, N / BLOCK_SIZE), dim3(BLOCK_SIZE, BLOCK_SIZE) >> > (dev_c, dev_a, dev_b, N);
cudaThreadSynchronize();
double deviceTime = double(clock() - beginD) * 1000 / CLOCKS_PER_SEC;
printf("Ошибка", deviceTime);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Ошибка", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Ошибка", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Ошибка");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
4582c1269beb536b6b11f1d146b2923127414a52.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/math_function.h"
namespace paddle {
namespace operators {
namespace math {
template <>
void gemm<platform::GPUPlace, float>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const float alpha, const float* A,
const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(transA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(transB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::hipblasSgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void gemm<platform::GPUPlace, double>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const double alpha, const double* A,
const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(transA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(transB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::hipblasDgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void gemm<platform::GPUPlace, float>(const platform::DeviceContext& context,
const bool transA, const bool transB,
const int M, const int N, const int K,
const float alpha, const float* A,
const int lda, const float* B,
const int ldb, const float beta, float* C,
const int ldc) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
hipblasOperation_t cuTransA = transA == false ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB = transB == false ? HIPBLAS_OP_N : HIPBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::hipblasSgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc));
}
template <>
void gemm<platform::GPUPlace, double>(const platform::DeviceContext& context,
const bool transA, const bool transB,
const int M, const int N, const int K,
const double alpha, const double* A,
const int lda, const double* B,
const int ldb, const double beta,
double* C, const int ldc) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
hipblasOperation_t cuTransA = transA == false ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB = transB == false ? HIPBLAS_OP_N : HIPBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::hipblasDgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc));
}
template <>
void matmul<platform::GPUPlace, float>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, float alpha,
framework::Tensor* matrix_out, float beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, float>(
context, transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>());
}
template <>
void matmul<platform::GPUPlace, double>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, double alpha,
framework::Tensor* matrix_out, double beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, double>(
context, transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>());
}
} // namespace math
} // namespace operators
} // namespace paddle
|
4582c1269beb536b6b11f1d146b2923127414a52.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/math_function.h"
namespace paddle {
namespace operators {
namespace math {
template <>
void gemm<platform::GPUPlace, float>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const float alpha, const float* A,
const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasSgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void gemm<platform::GPUPlace, double>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const double alpha, const double* A,
const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasDgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void gemm<platform::GPUPlace, float>(const platform::DeviceContext& context,
const bool transA, const bool transB,
const int M, const int N, const int K,
const float alpha, const float* A,
const int lda, const float* B,
const int ldb, const float beta, float* C,
const int ldc) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
cublasOperation_t cuTransA = transA == false ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB = transB == false ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasSgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc));
}
template <>
void gemm<platform::GPUPlace, double>(const platform::DeviceContext& context,
const bool transA, const bool transB,
const int M, const int N, const int K,
const double alpha, const double* A,
const int lda, const double* B,
const int ldb, const double beta,
double* C, const int ldc) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
cublasOperation_t cuTransA = transA == false ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB = transB == false ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasDgemm(
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc));
}
template <>
void matmul<platform::GPUPlace, float>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, float alpha,
framework::Tensor* matrix_out, float beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, float>(
context, transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>());
}
template <>
void matmul<platform::GPUPlace, double>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, double alpha,
framework::Tensor* matrix_out, double beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, double>(
context, transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>());
}
} // namespace math
} // namespace operators
} // namespace paddle
|
361626947570e1556fdfdc9581f8d15c2b7eaf8e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
using namespace std;
int print_cuda_version()
{
int count = 0;
if (hipSuccess != hipGetDeviceCount(&count)) {
return -1;
}
if (count == 0) {
return -1;
}
for (int device = 0; device < count; ++device) {
hipDeviceProp_t prop;
if (hipSuccess == hipGetDeviceProperties(&prop, device)) {
std::cout << prop.major << "." << prop.minor << std::endl;
}
}
return 0;
}
int main(int argc, char ** argv)
{
return print_cuda_version();
}
|
361626947570e1556fdfdc9581f8d15c2b7eaf8e.cu
|
#include <iostream>
using namespace std;
int print_cuda_version()
{
int count = 0;
if (cudaSuccess != cudaGetDeviceCount(&count)) {
return -1;
}
if (count == 0) {
return -1;
}
for (int device = 0; device < count; ++device) {
cudaDeviceProp prop;
if (cudaSuccess == cudaGetDeviceProperties(&prop, device)) {
std::cout << prop.major << "." << prop.minor << std::endl;
}
}
return 0;
}
int main(int argc, char ** argv)
{
return print_cuda_version();
}
|
afcdd46985be558d430731fbd3625e5b36cf8eed.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
__global__ void slice_kernel(
float *out_arr,
const float *in_arr,
const int64_t *o_shape,
const int64_t *i_shape,
const int64_t *begin_pos,
size_t ndim,
size_t size
) {
size_t o_index = blockIdx.x * blockDim.x + threadIdx.x;
if (o_index >= size) return;
size_t tmp_index = o_index;
size_t i_index = 0;
int64_t i_mat = 1;
for (int i = ndim - 1; i >= 0; --i) {
int64_t offset = begin_pos[i] + tmp_index % o_shape[i];
tmp_index /= o_shape[i];
i_index += offset * i_mat;
i_mat *= i_shape[i];
}
out_arr[o_index] = in_arr[i_index];
}
int DLGpuSlice(
const DLArrayHandle in_arr,
DLArrayHandle out_arr,
int64_t *begin_pos,
DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL
) {
assert(in_arr->ndim == out_arr->ndim);
size_t ndim = in_arr->ndim;
size_t o_size = 1;
for (int i = 0; i < ndim; ++i) {
assert(begin_pos[i] >= 0);
assert(begin_pos[i] + out_arr->shape[i] <= in_arr->shape[i]);
o_size *= out_arr ->shape[i];
}
const float *i_data = (const float *)in_arr->data;
float *o_data = (float *)out_arr->data;
size_t alloc_size = ndim * sizeof(int64_t);
void *pos = MemoryPool -> DLMemoryMalloc(alloc_size);
void *i_shape = MemoryPool -> DLMemoryMalloc(alloc_size);
void *o_shape = MemoryPool -> DLMemoryMalloc(alloc_size);
CUDA_CALL(hipMemcpy(pos, (void *)begin_pos,
alloc_size, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(i_shape, (void *)in_arr->shape,
alloc_size, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(o_shape, (void *)out_arr->shape,
alloc_size, hipMemcpyHostToDevice));
dim3 blocks;
dim3 threads;
if (o_size <= 1024) {
threads.x = o_size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (o_size + 1023) / 1024;
}
if (stream_handle)
hipLaunchKernelGGL(( slice_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle,
o_data, i_data, (const int64_t*)o_shape, (const int64_t*)i_shape, (const int64_t*)pos, ndim, o_size);
else
hipLaunchKernelGGL(( slice_kernel), dim3(blocks), dim3(threads), 0, 0,
o_data, i_data, (const int64_t*)o_shape, (const int64_t*)i_shape, (const int64_t*)pos, ndim, o_size);
MemoryPool -> DLMemoryFree(pos, alloc_size);
MemoryPool -> DLMemoryFree(i_shape, alloc_size);
MemoryPool -> DLMemoryFree(o_shape, alloc_size);
if(p != NULL){
int size_input = 1, size_output = 1;
for(int i = 0; i < in_arr -> ndim; i++)
size_input *= in_arr -> shape[i];
for(int i = 0; i < out_arr -> ndim; i++)
size_output *= out_arr -> shape[i];
p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 0;
}
return 0;
}
__global__ void slice_gradient_kernel(
float *out_arr,
const float *in_arr,
const int64_t *o_shape,
const int64_t *i_shape,
const int64_t *begin_pos,
size_t ndim,
size_t size
) {
size_t o_index = blockIdx.x * blockDim.x + threadIdx.x;
if (o_index >= size) return;
out_arr[o_index] = 0;
size_t tmp_index = o_index;
size_t i_index = 0;
int64_t i_mat = 1;
for (int i = ndim - 1; i >= 0; --i) {
int64_t offset = tmp_index % o_shape[i];
if (offset < begin_pos[i] || offset >= begin_pos[i] + i_shape[i]) return;
tmp_index /= o_shape[i];
i_index += (offset - begin_pos[i]) * i_mat;
i_mat *= i_shape[i];
}
out_arr[o_index] = in_arr[i_index];
}
int DLGpuSliceGradient(
const DLArrayHandle in_arr,
DLArrayHandle out_arr,
int64_t *begin_pos,
DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL
) {
assert(in_arr->ndim == out_arr->ndim);
size_t ndim = in_arr->ndim;
size_t o_size = 1;
for (int i = 0; i < ndim; ++i) {
assert(begin_pos[i] >= 0);
assert(begin_pos[i] + in_arr->shape[i] <= out_arr->shape[i]);
o_size *= out_arr ->shape[i];
}
const float *i_data = (const float *)in_arr->data;
float *o_data = (float *)out_arr->data;
size_t alloc_size = ndim * sizeof(int64_t);
void *pos = MemoryPool -> DLMemoryMalloc(alloc_size);
void *i_shape = MemoryPool -> DLMemoryMalloc(alloc_size);
void *o_shape = MemoryPool -> DLMemoryMalloc(alloc_size);
CUDA_CALL(hipMemcpy(pos, (void *)begin_pos,
alloc_size, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(i_shape, (void *)in_arr->shape,
alloc_size, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(o_shape, (void *)out_arr->shape,
alloc_size, hipMemcpyHostToDevice));
dim3 blocks;
dim3 threads;
if (o_size <= 1024) {
threads.x = o_size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (o_size + 1023) / 1024;
}
if (stream_handle)
hipLaunchKernelGGL(( slice_gradient_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle,
o_data, i_data, (const int64_t*)o_shape, (const int64_t*)i_shape, (const int64_t*)pos, ndim, o_size);
else
hipLaunchKernelGGL(( slice_gradient_kernel), dim3(blocks), dim3(threads), 0, 0,
o_data, i_data, (const int64_t*)o_shape, (const int64_t*)i_shape, (const int64_t*)pos, ndim, o_size);
MemoryPool -> DLMemoryFree(pos, alloc_size);
MemoryPool -> DLMemoryFree(i_shape, alloc_size);
MemoryPool -> DLMemoryFree(o_shape, alloc_size);
if(p != NULL){
int size_input = 1, size_output = 1;
for(int i = 0; i < in_arr -> ndim; i++)
size_input *= in_arr -> shape[i];
for(int i = 0; i < out_arr -> ndim; i++)
size_output *= out_arr -> shape[i];
p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 0;
}
return 0;
}
|
afcdd46985be558d430731fbd3625e5b36cf8eed.cu
|
#include "gpu_runtime.h"
__global__ void slice_kernel(
float *out_arr,
const float *in_arr,
const int64_t *o_shape,
const int64_t *i_shape,
const int64_t *begin_pos,
size_t ndim,
size_t size
) {
size_t o_index = blockIdx.x * blockDim.x + threadIdx.x;
if (o_index >= size) return;
size_t tmp_index = o_index;
size_t i_index = 0;
int64_t i_mat = 1;
for (int i = ndim - 1; i >= 0; --i) {
int64_t offset = begin_pos[i] + tmp_index % o_shape[i];
tmp_index /= o_shape[i];
i_index += offset * i_mat;
i_mat *= i_shape[i];
}
out_arr[o_index] = in_arr[i_index];
}
int DLGpuSlice(
const DLArrayHandle in_arr,
DLArrayHandle out_arr,
int64_t *begin_pos,
DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL
) {
assert(in_arr->ndim == out_arr->ndim);
size_t ndim = in_arr->ndim;
size_t o_size = 1;
for (int i = 0; i < ndim; ++i) {
assert(begin_pos[i] >= 0);
assert(begin_pos[i] + out_arr->shape[i] <= in_arr->shape[i]);
o_size *= out_arr ->shape[i];
}
const float *i_data = (const float *)in_arr->data;
float *o_data = (float *)out_arr->data;
size_t alloc_size = ndim * sizeof(int64_t);
void *pos = MemoryPool -> DLMemoryMalloc(alloc_size);
void *i_shape = MemoryPool -> DLMemoryMalloc(alloc_size);
void *o_shape = MemoryPool -> DLMemoryMalloc(alloc_size);
CUDA_CALL(cudaMemcpy(pos, (void *)begin_pos,
alloc_size, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(i_shape, (void *)in_arr->shape,
alloc_size, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(o_shape, (void *)out_arr->shape,
alloc_size, cudaMemcpyHostToDevice));
dim3 blocks;
dim3 threads;
if (o_size <= 1024) {
threads.x = o_size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (o_size + 1023) / 1024;
}
if (stream_handle)
slice_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>
(o_data, i_data, (const int64_t*)o_shape, (const int64_t*)i_shape, (const int64_t*)pos, ndim, o_size);
else
slice_kernel<<<blocks, threads>>>
(o_data, i_data, (const int64_t*)o_shape, (const int64_t*)i_shape, (const int64_t*)pos, ndim, o_size);
MemoryPool -> DLMemoryFree(pos, alloc_size);
MemoryPool -> DLMemoryFree(i_shape, alloc_size);
MemoryPool -> DLMemoryFree(o_shape, alloc_size);
if(p != NULL){
int size_input = 1, size_output = 1;
for(int i = 0; i < in_arr -> ndim; i++)
size_input *= in_arr -> shape[i];
for(int i = 0; i < out_arr -> ndim; i++)
size_output *= out_arr -> shape[i];
p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 0;
}
return 0;
}
__global__ void slice_gradient_kernel(
float *out_arr,
const float *in_arr,
const int64_t *o_shape,
const int64_t *i_shape,
const int64_t *begin_pos,
size_t ndim,
size_t size
) {
size_t o_index = blockIdx.x * blockDim.x + threadIdx.x;
if (o_index >= size) return;
out_arr[o_index] = 0;
size_t tmp_index = o_index;
size_t i_index = 0;
int64_t i_mat = 1;
for (int i = ndim - 1; i >= 0; --i) {
int64_t offset = tmp_index % o_shape[i];
if (offset < begin_pos[i] || offset >= begin_pos[i] + i_shape[i]) return;
tmp_index /= o_shape[i];
i_index += (offset - begin_pos[i]) * i_mat;
i_mat *= i_shape[i];
}
out_arr[o_index] = in_arr[i_index];
}
int DLGpuSliceGradient(
const DLArrayHandle in_arr,
DLArrayHandle out_arr,
int64_t *begin_pos,
DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL
) {
assert(in_arr->ndim == out_arr->ndim);
size_t ndim = in_arr->ndim;
size_t o_size = 1;
for (int i = 0; i < ndim; ++i) {
assert(begin_pos[i] >= 0);
assert(begin_pos[i] + in_arr->shape[i] <= out_arr->shape[i]);
o_size *= out_arr ->shape[i];
}
const float *i_data = (const float *)in_arr->data;
float *o_data = (float *)out_arr->data;
size_t alloc_size = ndim * sizeof(int64_t);
void *pos = MemoryPool -> DLMemoryMalloc(alloc_size);
void *i_shape = MemoryPool -> DLMemoryMalloc(alloc_size);
void *o_shape = MemoryPool -> DLMemoryMalloc(alloc_size);
CUDA_CALL(cudaMemcpy(pos, (void *)begin_pos,
alloc_size, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(i_shape, (void *)in_arr->shape,
alloc_size, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(o_shape, (void *)out_arr->shape,
alloc_size, cudaMemcpyHostToDevice));
dim3 blocks;
dim3 threads;
if (o_size <= 1024) {
threads.x = o_size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (o_size + 1023) / 1024;
}
if (stream_handle)
slice_gradient_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>
(o_data, i_data, (const int64_t*)o_shape, (const int64_t*)i_shape, (const int64_t*)pos, ndim, o_size);
else
slice_gradient_kernel<<<blocks, threads>>>
(o_data, i_data, (const int64_t*)o_shape, (const int64_t*)i_shape, (const int64_t*)pos, ndim, o_size);
MemoryPool -> DLMemoryFree(pos, alloc_size);
MemoryPool -> DLMemoryFree(i_shape, alloc_size);
MemoryPool -> DLMemoryFree(o_shape, alloc_size);
if(p != NULL){
int size_input = 1, size_output = 1;
for(int i = 0; i < in_arr -> ndim; i++)
size_input *= in_arr -> shape[i];
for(int i = 0; i < out_arr -> ndim; i++)
size_output *= out_arr -> shape[i];
p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 0;
}
return 0;
}
|
1b1fbc33bc7000f5a89e547d5690ebe2fc8d9baa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* FLAME GPU v 1.2.0 for CUDA 6
* Copyright 2014 University of Sheffield.
* Author: Dr Paul Richmond
* Contact: [email protected] (http://www.paulrichmond.staff.shef.ac.uk)
*
* University of Sheffield retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* University of Sheffield is strictly prohibited.
*
* For terms of licence agreement please attached licence or view licence
* on www.flamegpu.com website.
*
*/
// includes, project
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil.h>
#include <cutil_math.h>
#include <GL/glew.h>
#include <GL/glut.h>
#include <cuda_gl_interop.h>
#include <header.h>
#include <visualisation.h>
// bo variables
GLuint sphereVerts;
GLuint sphereNormals;
//Simulation output buffers/textures
GLuint Boid_default_tbo;
GLuint Boid_default_displacementTex;
// mouse controls
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
float rotate_x = 0.0, rotate_y = 0.0;
float translate_z = -VIEW_DISTANCE;
// vertex Shader
GLuint vertexShader;
GLuint fragmentShader;
GLuint shaderProgram;
GLuint vs_displacementMap;
GLuint vs_mapIndex;
//timer
GLuint timer;
const int average = 50;
int frame_count;
#ifdef SIMULATION_DELAY
//delay
int delay_count = 0;
#endif
// prototypes
CUTBoolean initGL();
void initShader();
void createVBO( GLuint* vbo, GLuint size);
void deleteVBO( GLuint* vbo);
void createTBO( GLuint* tbo, GLuint* tex, GLuint size);
void deleteTBO( GLuint* tbo);
void setVertexBufferData();
void display();
void keyboard( unsigned char key, int x, int y);
void mouse(int button, int state, int x, int y);
void motion(int x, int y);
void runCuda();
void checkGLError();
const char vertexShaderSource[] =
{
"#extension GL_EXT_gpu_shader4 : enable \n"
"uniform samplerBuffer displacementMap; \n"
"attribute in float mapIndex; \n"
"varying vec3 normal, lightDir; \n"
"varying vec4 colour; \n"
"void main() \n"
"{ \n"
" vec4 position = gl_Vertex; \n"
" vec4 lookup = texelFetchBuffer(displacementMap, (int)mapIndex); \n"
" if (lookup.w > 6.5) \n"
" colour = vec4(1.0, 1.0, 1.0, 0.0); \n"
" else if (lookup.w > 5.5) \n"
" colour = vec4(1.0, 0.0, 1.0, 0.0); \n"
" else if (lookup.w > 4.5) \n"
" colour = vec4(0.0, 1.0, 1.0, 0.0); \n"
" else if (lookup.w > 3.5) \n"
" colour = vec4(1.0, 1.0, 0.0, 0.0); \n"
" else if (lookup.w > 2.5) \n"
" colour = vec4(0.0, 0.0, 1.0, 0.0); \n"
" else if (lookup.w > 1.5) \n"
" colour = vec4(0.0, 1.0, 0.0, 0.0); \n"
" else if (lookup.w > 0.5) \n"
" colour = vec4(1.0, 0.0, 0.0, 0.0); \n"
" else \n"
" colour = vec4(0.0, 0.0, 0.0, 0.0); \n"
" \n"
" lookup.w = 1.0; \n"
" position += lookup; \n"
" gl_Position = gl_ModelViewProjectionMatrix * position; \n"
" \n"
" vec3 mvVertex = vec3(gl_ModelViewMatrix * position); \n"
" lightDir = vec3(gl_LightSource[0].position.xyz - mvVertex); \n"
" normal = gl_NormalMatrix * gl_Normal; \n"
"} \n"
};
const char fragmentShaderSource[] =
{
"varying vec3 normal, lightDir; \n"
"varying vec4 colour; \n"
"void main (void) \n"
"{ \n"
" // Defining The Material Colors \n"
" vec4 AmbientColor = vec4(0.25, 0.0, 0.0, 1.0); \n"
" vec4 DiffuseColor = colour; \n"
" \n"
" // Scaling The Input Vector To Length 1 \n"
" vec3 n_normal = normalize(normal); \n"
" vec3 n_lightDir = normalize(lightDir); \n"
" \n"
" // Calculating The Diffuse Term And Clamping It To [0;1] \n"
" float DiffuseTerm = clamp(dot(n_normal, n_lightDir), 0.0, 1.0);\n"
" \n"
" // Calculating The Final Color \n"
" gl_FragColor = AmbientColor + DiffuseColor * DiffuseTerm; \n"
" \n"
"} \n"
};
//GPU Kernels
__global__ void output_Boid_agent_to_VBO(xmachine_memory_Boid_list* agents, float4* vbo, float3 centralise){
//global thread index
int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
vbo[index].x = 0.0;
vbo[index].y = 0.0;
vbo[index].z = 0.0;
vbo[index].x = (agents->x[index] - centralise.x) / centralise.x;
vbo[index].y = (agents->y[index] - centralise.y) / centralise.y;
vbo[index].z = 0.0;
vbo[index].w = 1.0;
if (index == 0)
printf("%f ", agents->x[index]);
}
void initVisualisation()
{
//set the CUDA GL device: Will cause an error without this since CUDA 3.0
hipGLSetGLDevice(0);
// Create GL context
int argc = 1;
char *argv[] = {"GLUT application", NULL};
glutInit( &argc, argv);
glutInitDisplayMode( GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize( WINDOW_WIDTH, WINDOW_HEIGHT);
glutCreateWindow( "FLAME GPU Visualiser");
// initialize GL
if( CUTFalse == initGL()) {
return;
}
initShader();
// register callbacks
glutDisplayFunc( display);
glutKeyboardFunc( keyboard);
glutMouseFunc( mouse);
glutMotionFunc( motion);
// create VBO's
createVBO( &sphereVerts, SPHERE_SLICES* (SPHERE_STACKS+1) * sizeof(float3));
createVBO( &sphereNormals, SPHERE_SLICES* (SPHERE_STACKS+1) * sizeof (float3));
setVertexBufferData();
// create TBO
createTBO( &Boid_default_tbo, &Boid_default_displacementTex, xmachine_memory_Boid_MAX * sizeof( float4));
//set shader uniforms
glUseProgram(shaderProgram);
CUT_SAFE_CALL( cutCreateTimer( &timer));
}
void runVisualisation(){
// start rendering mainloop
glutMainLoop();
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda part of the computation
////////////////////////////////////////////////////////////////////////////////
void runCuda()
{
#ifdef SIMULATION_DELAY
delay_count++;
if (delay_count == SIMULATION_DELAY){
delay_count = 0;
singleIteration();
}
#else
singleIteration();
#endif
//kernals sizes
int threads_per_tile = 256;
int tile_size;
dim3 grid;
dim3 threads;
float3 centralise;
//pointer
float4 *dptr;
if (get_agent_Boid_default_count() > 0)
{
// map OpenGL buffer object for writing from CUDA
CUDA_SAFE_CALL(hipGLMapBufferObject__( (void**)&dptr, Boid_default_tbo));
//cuda block size
tile_size = (int) ceil((float)get_agent_Boid_default_count()/threads_per_tile);
grid = dim3(tile_size, 1, 1);
threads = dim3(threads_per_tile, 1, 1);
//continuous variables
centralise = getMaximumBounds() + getMinimumBounds();
centralise /= 2;
hipLaunchKernelGGL(( output_Boid_agent_to_VBO), dim3(grid), dim3(threads), 0, 0, get_device_Boid_default_agents(), dptr, centralise);
CUT_CHECK_ERROR("Kernel execution failed");
// unmap buffer object
CUDA_SAFE_CALL(hipGLUnmapBufferObject(Boid_default_tbo));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Initialize GL
////////////////////////////////////////////////////////////////////////////////
CUTBoolean initGL()
{
// initialize necessary OpenGL extensions
glewInit();
if (! glewIsSupported( "GL_VERSION_2_0 "
"GL_ARB_pixel_buffer_object"
)) {
fprintf( stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush( stderr);
return CUTFalse;
}
// default initialization
glClearColor( 1.0, 1.0, 1.0, 1.0);
glEnable( GL_DEPTH_TEST);
// viewport
glViewport( 0, 0, WINDOW_WIDTH, WINDOW_HEIGHT);
// projection
glMatrixMode( GL_PROJECTION);
glLoadIdentity();
gluPerspective(45.0, (GLfloat)WINDOW_WIDTH / (GLfloat) WINDOW_HEIGHT, NEAR_CLIP, FAR_CLIP);
checkGLError();
//lighting
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
return CUTTrue;
}
////////////////////////////////////////////////////////////////////////////////
//! Initialize GLSL Vertex Shader
////////////////////////////////////////////////////////////////////////////////
void initShader()
{
const char* v = vertexShaderSource;
const char* f = fragmentShaderSource;
//vertex shader
vertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertexShader, 1, &v, 0);
glCompileShader(vertexShader);
//fragment shader
fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragmentShader, 1, &f, 0);
glCompileShader(fragmentShader);
//program
shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glLinkProgram(shaderProgram);
// check for errors
GLint status;
glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &status);
if (status == GL_FALSE){
printf("ERROR: Shader Compilation Error\n");
char data[262144];
int len;
glGetShaderInfoLog(vertexShader, 262144, &len, data);
printf("%s", data);
}
glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &status);
if (status == GL_FALSE){
printf("ERROR: Shader Compilation Error\n");
char data[262144];
int len;
glGetShaderInfoLog(fragmentShader, 262144, &len, data);
printf("%s", data);
}
glGetProgramiv(shaderProgram, GL_LINK_STATUS, &status);
if (status == GL_FALSE){
printf("ERROR: Shader Program Link Error\n");
}
// get shader variables
vs_displacementMap = glGetUniformLocation(shaderProgram, "displacementMap");
vs_mapIndex = glGetAttribLocation(shaderProgram, "mapIndex");
}
////////////////////////////////////////////////////////////////////////////////
//! Create VBO
////////////////////////////////////////////////////////////////////////////////
void createVBO(GLuint* vbo, GLuint size)
{
// create buffer object
glGenBuffers( 1, vbo);
glBindBuffer( GL_ARRAY_BUFFER, *vbo);
// initialize buffer object
glBufferData( GL_ARRAY_BUFFER, size, 0, GL_STATIC_DRAW);
glBindBuffer( GL_ARRAY_BUFFER, 0);
checkGLError();
}
////////////////////////////////////////////////////////////////////////////////
//! Delete VBO
////////////////////////////////////////////////////////////////////////////////
void deleteVBO( GLuint* vbo)
{
glBindBuffer( 1, *vbo);
glDeleteBuffers( 1, vbo);
*vbo = 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Create TBO
////////////////////////////////////////////////////////////////////////////////
void createTBO(GLuint* tbo, GLuint* tex, GLuint size)
{
// create buffer object
glGenBuffers( 1, tbo);
glBindBuffer( GL_TEXTURE_BUFFER_EXT, *tbo);
// initialize buffer object
glBufferData( GL_TEXTURE_BUFFER_EXT, size, 0, GL_DYNAMIC_DRAW);
//tex
glGenTextures(1, tex);
glBindTexture(GL_TEXTURE_BUFFER_EXT, *tex);
glTexBufferEXT(GL_TEXTURE_BUFFER_EXT, GL_RGBA32F_ARB, *tbo);
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
// register buffer object with CUDA
CUDA_SAFE_CALL(hipGLRegisterBufferObject(*tbo));
checkGLError();
}
////////////////////////////////////////////////////////////////////////////////
//! Delete TBO
////////////////////////////////////////////////////////////////////////////////
void deleteTBO( GLuint* tbo)
{
glBindBuffer( 1, *tbo);
glDeleteBuffers( 1, tbo);
CUDA_SAFE_CALL(hipGLUnregisterBufferObject(*tbo));
*tbo = 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Set Sphere Vertex Data
////////////////////////////////////////////////////////////////////////////////
static void setSphereVertex(float3* data, int slice, int stack) {
float PI = 3.14159265358;
double sl = 2*PI*slice/SPHERE_SLICES;
double st = 2*PI*stack/SPHERE_STACKS;
data->x = cos(st)*sin(sl) * SPHERE_RADIUS;
data->y = sin(st)*sin(sl) * SPHERE_RADIUS;
data->z = cos(sl) * SPHERE_RADIUS;
}
////////////////////////////////////////////////////////////////////////////////
//! Set Sphere Normal Data
////////////////////////////////////////////////////////////////////////////////
static void setSphereNormal(float3* data, int slice, int stack) {
float PI = 3.14159265358;
double sl = 2*PI*slice/SPHERE_SLICES;
double st = 2*PI*stack/SPHERE_STACKS;
data->x = cos(st)*sin(sl);
data->y = sin(st)*sin(sl);
data->z = cos(sl);
}
////////////////////////////////////////////////////////////////////////////////
//! Set Vertex Buffer Data
////////////////////////////////////////////////////////////////////////////////
void setVertexBufferData()
{
int slice, stack;
int i;
// upload vertex points data
glBindBuffer(GL_ARRAY_BUFFER, sphereVerts);
float3* verts =( float3*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
i = 0;
for (slice=0; slice<SPHERE_SLICES/2; slice++) {
for (stack=0; stack<=SPHERE_STACKS; stack++) {
setSphereVertex(&verts[i++], slice, stack);
setSphereVertex(&verts[i++], slice+1, stack);
}
}
glUnmapBuffer(GL_ARRAY_BUFFER);
// upload vertex normal data
glBindBuffer(GL_ARRAY_BUFFER, sphereNormals);
float3* normals =( float3*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
i = 0;
for (slice=0; slice<SPHERE_SLICES/2; slice++) {
for (stack=0; stack<=SPHERE_STACKS; stack++) {
setSphereNormal(&normals[i++], slice, stack);
setSphereNormal(&normals[i++], slice+1, stack);
}
}
glUnmapBuffer(GL_ARRAY_BUFFER);
}
////////////////////////////////////////////////////////////////////////////////
//! Display callback
////////////////////////////////////////////////////////////////////////////////
void display()
{
//CUDA start Timing
CUT_SAFE_CALL( cutStartTimer( timer));
// run CUDA kernel to generate vertex positions
runCuda();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// set view matrix
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//zoom
glTranslatef(0.0, 0.0, translate_z);
//move
glRotatef(rotate_x, 1.0, 0.0, 0.0);
glRotatef(rotate_y, 0.0, 0.0, 1.0);
//Set light position
glLightfv(GL_LIGHT0, GL_POSITION, LIGHT_POSITION);
//Draw Boid Agents in default state
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_BUFFER_EXT, Boid_default_displacementTex);
//loop
for (int i=0; i< get_agent_Boid_default_count(); i++){
glVertexAttrib1f(vs_mapIndex, (float)i);
//draw using vertex and attribute data on the gpu (fast)
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, sphereVerts);
glVertexPointer(3, GL_FLOAT, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, sphereNormals);
glNormalPointer(GL_FLOAT, 0, 0);
glDrawArrays(GL_TRIANGLE_STRIP, 0, SPHERE_SLICES * (SPHERE_STACKS+1));
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
}
//CUDA stop timing
hipDeviceSynchronize();
glFlush();
CUT_SAFE_CALL( cutStopTimer( timer));
if(frame_count == average){
char title [100];
sprintf(title, "Execution & Rendering Total: %f (FPS)", average/(cutGetTimerValue( timer)/1000.0f));
glutSetWindowTitle(title);
//reset
frame_count = 0;
CUT_SAFE_CALL( cutResetTimer( timer));
}else{
frame_count++;
}
glutSwapBuffers();
glutPostRedisplay();
}
////////////////////////////////////////////////////////////////////////////////
//! Keyboard events handler
////////////////////////////////////////////////////////////////////////////////
void keyboard( unsigned char key, int /*x*/, int /*y*/)
{
switch( key) {
case( 27) :
deleteVBO( &sphereVerts);
deleteVBO( &sphereNormals);
deleteTBO( &Boid_default_tbo);
exit( 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Mouse event handlers
////////////////////////////////////////////////////////////////////////////////
void mouse(int button, int state, int x, int y)
{
if (state == GLUT_DOWN) {
mouse_buttons |= 1<<button;
} else if (state == GLUT_UP) {
mouse_buttons = 0;
}
mouse_old_x = x;
mouse_old_y = y;
glutPostRedisplay();
}
void motion(int x, int y)
{
float dx, dy;
dx = x - mouse_old_x;
dy = y - mouse_old_y;
if (mouse_buttons & 1) {
rotate_x += dy * 0.2;
rotate_y += dx * 0.2;
} else if (mouse_buttons & 4) {
translate_z += dy * VIEW_DISTANCE* 0.001;
}
mouse_old_x = x;
mouse_old_y = y;
}
void checkGLError(){
int Error;
if((Error = glGetError()) != GL_NO_ERROR)
{
const char* Message = (const char*)gluErrorString(Error);
fprintf(stderr, "OpenGL Error : %s\n", Message);
}
}
|
1b1fbc33bc7000f5a89e547d5690ebe2fc8d9baa.cu
|
/*
* FLAME GPU v 1.2.0 for CUDA 6
* Copyright 2014 University of Sheffield.
* Author: Dr Paul Richmond
* Contact: [email protected] (http://www.paulrichmond.staff.shef.ac.uk)
*
* University of Sheffield retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* University of Sheffield is strictly prohibited.
*
* For terms of licence agreement please attached licence or view licence
* on www.flamegpu.com website.
*
*/
// includes, project
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil.h>
#include <cutil_math.h>
#include <GL/glew.h>
#include <GL/glut.h>
#include <cuda_gl_interop.h>
#include <header.h>
#include <visualisation.h>
// bo variables
GLuint sphereVerts;
GLuint sphereNormals;
//Simulation output buffers/textures
GLuint Boid_default_tbo;
GLuint Boid_default_displacementTex;
// mouse controls
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
float rotate_x = 0.0, rotate_y = 0.0;
float translate_z = -VIEW_DISTANCE;
// vertex Shader
GLuint vertexShader;
GLuint fragmentShader;
GLuint shaderProgram;
GLuint vs_displacementMap;
GLuint vs_mapIndex;
//timer
GLuint timer;
const int average = 50;
int frame_count;
#ifdef SIMULATION_DELAY
//delay
int delay_count = 0;
#endif
// prototypes
CUTBoolean initGL();
void initShader();
void createVBO( GLuint* vbo, GLuint size);
void deleteVBO( GLuint* vbo);
void createTBO( GLuint* tbo, GLuint* tex, GLuint size);
void deleteTBO( GLuint* tbo);
void setVertexBufferData();
void display();
void keyboard( unsigned char key, int x, int y);
void mouse(int button, int state, int x, int y);
void motion(int x, int y);
void runCuda();
void checkGLError();
const char vertexShaderSource[] =
{
"#extension GL_EXT_gpu_shader4 : enable \n"
"uniform samplerBuffer displacementMap; \n"
"attribute in float mapIndex; \n"
"varying vec3 normal, lightDir; \n"
"varying vec4 colour; \n"
"void main() \n"
"{ \n"
" vec4 position = gl_Vertex; \n"
" vec4 lookup = texelFetchBuffer(displacementMap, (int)mapIndex); \n"
" if (lookup.w > 6.5) \n"
" colour = vec4(1.0, 1.0, 1.0, 0.0); \n"
" else if (lookup.w > 5.5) \n"
" colour = vec4(1.0, 0.0, 1.0, 0.0); \n"
" else if (lookup.w > 4.5) \n"
" colour = vec4(0.0, 1.0, 1.0, 0.0); \n"
" else if (lookup.w > 3.5) \n"
" colour = vec4(1.0, 1.0, 0.0, 0.0); \n"
" else if (lookup.w > 2.5) \n"
" colour = vec4(0.0, 0.0, 1.0, 0.0); \n"
" else if (lookup.w > 1.5) \n"
" colour = vec4(0.0, 1.0, 0.0, 0.0); \n"
" else if (lookup.w > 0.5) \n"
" colour = vec4(1.0, 0.0, 0.0, 0.0); \n"
" else \n"
" colour = vec4(0.0, 0.0, 0.0, 0.0); \n"
" \n"
" lookup.w = 1.0; \n"
" position += lookup; \n"
" gl_Position = gl_ModelViewProjectionMatrix * position; \n"
" \n"
" vec3 mvVertex = vec3(gl_ModelViewMatrix * position); \n"
" lightDir = vec3(gl_LightSource[0].position.xyz - mvVertex); \n"
" normal = gl_NormalMatrix * gl_Normal; \n"
"} \n"
};
const char fragmentShaderSource[] =
{
"varying vec3 normal, lightDir; \n"
"varying vec4 colour; \n"
"void main (void) \n"
"{ \n"
" // Defining The Material Colors \n"
" vec4 AmbientColor = vec4(0.25, 0.0, 0.0, 1.0); \n"
" vec4 DiffuseColor = colour; \n"
" \n"
" // Scaling The Input Vector To Length 1 \n"
" vec3 n_normal = normalize(normal); \n"
" vec3 n_lightDir = normalize(lightDir); \n"
" \n"
" // Calculating The Diffuse Term And Clamping It To [0;1] \n"
" float DiffuseTerm = clamp(dot(n_normal, n_lightDir), 0.0, 1.0);\n"
" \n"
" // Calculating The Final Color \n"
" gl_FragColor = AmbientColor + DiffuseColor * DiffuseTerm; \n"
" \n"
"} \n"
};
//GPU Kernels
__global__ void output_Boid_agent_to_VBO(xmachine_memory_Boid_list* agents, float4* vbo, float3 centralise){
//global thread index
int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
vbo[index].x = 0.0;
vbo[index].y = 0.0;
vbo[index].z = 0.0;
vbo[index].x = (agents->x[index] - centralise.x) / centralise.x;
vbo[index].y = (agents->y[index] - centralise.y) / centralise.y;
vbo[index].z = 0.0;
vbo[index].w = 1.0;
if (index == 0)
printf("%f ", agents->x[index]);
}
void initVisualisation()
{
//set the CUDA GL device: Will cause an error without this since CUDA 3.0
cudaGLSetGLDevice(0);
// Create GL context
int argc = 1;
char *argv[] = {"GLUT application", NULL};
glutInit( &argc, argv);
glutInitDisplayMode( GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize( WINDOW_WIDTH, WINDOW_HEIGHT);
glutCreateWindow( "FLAME GPU Visualiser");
// initialize GL
if( CUTFalse == initGL()) {
return;
}
initShader();
// register callbacks
glutDisplayFunc( display);
glutKeyboardFunc( keyboard);
glutMouseFunc( mouse);
glutMotionFunc( motion);
// create VBO's
createVBO( &sphereVerts, SPHERE_SLICES* (SPHERE_STACKS+1) * sizeof(float3));
createVBO( &sphereNormals, SPHERE_SLICES* (SPHERE_STACKS+1) * sizeof (float3));
setVertexBufferData();
// create TBO
createTBO( &Boid_default_tbo, &Boid_default_displacementTex, xmachine_memory_Boid_MAX * sizeof( float4));
//set shader uniforms
glUseProgram(shaderProgram);
CUT_SAFE_CALL( cutCreateTimer( &timer));
}
void runVisualisation(){
// start rendering mainloop
glutMainLoop();
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda part of the computation
////////////////////////////////////////////////////////////////////////////////
void runCuda()
{
#ifdef SIMULATION_DELAY
delay_count++;
if (delay_count == SIMULATION_DELAY){
delay_count = 0;
singleIteration();
}
#else
singleIteration();
#endif
//kernals sizes
int threads_per_tile = 256;
int tile_size;
dim3 grid;
dim3 threads;
float3 centralise;
//pointer
float4 *dptr;
if (get_agent_Boid_default_count() > 0)
{
// map OpenGL buffer object for writing from CUDA
CUDA_SAFE_CALL(cudaGLMapBufferObject( (void**)&dptr, Boid_default_tbo));
//cuda block size
tile_size = (int) ceil((float)get_agent_Boid_default_count()/threads_per_tile);
grid = dim3(tile_size, 1, 1);
threads = dim3(threads_per_tile, 1, 1);
//continuous variables
centralise = getMaximumBounds() + getMinimumBounds();
centralise /= 2;
output_Boid_agent_to_VBO<<< grid, threads>>>(get_device_Boid_default_agents(), dptr, centralise);
CUT_CHECK_ERROR("Kernel execution failed");
// unmap buffer object
CUDA_SAFE_CALL(cudaGLUnmapBufferObject(Boid_default_tbo));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Initialize GL
////////////////////////////////////////////////////////////////////////////////
CUTBoolean initGL()
{
// initialize necessary OpenGL extensions
glewInit();
if (! glewIsSupported( "GL_VERSION_2_0 "
"GL_ARB_pixel_buffer_object"
)) {
fprintf( stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush( stderr);
return CUTFalse;
}
// default initialization
glClearColor( 1.0, 1.0, 1.0, 1.0);
glEnable( GL_DEPTH_TEST);
// viewport
glViewport( 0, 0, WINDOW_WIDTH, WINDOW_HEIGHT);
// projection
glMatrixMode( GL_PROJECTION);
glLoadIdentity();
gluPerspective(45.0, (GLfloat)WINDOW_WIDTH / (GLfloat) WINDOW_HEIGHT, NEAR_CLIP, FAR_CLIP);
checkGLError();
//lighting
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
return CUTTrue;
}
////////////////////////////////////////////////////////////////////////////////
//! Initialize GLSL Vertex Shader
////////////////////////////////////////////////////////////////////////////////
void initShader()
{
const char* v = vertexShaderSource;
const char* f = fragmentShaderSource;
//vertex shader
vertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertexShader, 1, &v, 0);
glCompileShader(vertexShader);
//fragment shader
fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragmentShader, 1, &f, 0);
glCompileShader(fragmentShader);
//program
shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glLinkProgram(shaderProgram);
// check for errors
GLint status;
glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &status);
if (status == GL_FALSE){
printf("ERROR: Shader Compilation Error\n");
char data[262144];
int len;
glGetShaderInfoLog(vertexShader, 262144, &len, data);
printf("%s", data);
}
glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &status);
if (status == GL_FALSE){
printf("ERROR: Shader Compilation Error\n");
char data[262144];
int len;
glGetShaderInfoLog(fragmentShader, 262144, &len, data);
printf("%s", data);
}
glGetProgramiv(shaderProgram, GL_LINK_STATUS, &status);
if (status == GL_FALSE){
printf("ERROR: Shader Program Link Error\n");
}
// get shader variables
vs_displacementMap = glGetUniformLocation(shaderProgram, "displacementMap");
vs_mapIndex = glGetAttribLocation(shaderProgram, "mapIndex");
}
////////////////////////////////////////////////////////////////////////////////
//! Create VBO
////////////////////////////////////////////////////////////////////////////////
void createVBO(GLuint* vbo, GLuint size)
{
// create buffer object
glGenBuffers( 1, vbo);
glBindBuffer( GL_ARRAY_BUFFER, *vbo);
// initialize buffer object
glBufferData( GL_ARRAY_BUFFER, size, 0, GL_STATIC_DRAW);
glBindBuffer( GL_ARRAY_BUFFER, 0);
checkGLError();
}
////////////////////////////////////////////////////////////////////////////////
//! Delete VBO
////////////////////////////////////////////////////////////////////////////////
void deleteVBO( GLuint* vbo)
{
glBindBuffer( 1, *vbo);
glDeleteBuffers( 1, vbo);
*vbo = 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Create TBO
////////////////////////////////////////////////////////////////////////////////
void createTBO(GLuint* tbo, GLuint* tex, GLuint size)
{
// create buffer object
glGenBuffers( 1, tbo);
glBindBuffer( GL_TEXTURE_BUFFER_EXT, *tbo);
// initialize buffer object
glBufferData( GL_TEXTURE_BUFFER_EXT, size, 0, GL_DYNAMIC_DRAW);
//tex
glGenTextures(1, tex);
glBindTexture(GL_TEXTURE_BUFFER_EXT, *tex);
glTexBufferEXT(GL_TEXTURE_BUFFER_EXT, GL_RGBA32F_ARB, *tbo);
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
// register buffer object with CUDA
CUDA_SAFE_CALL(cudaGLRegisterBufferObject(*tbo));
checkGLError();
}
////////////////////////////////////////////////////////////////////////////////
//! Delete TBO
////////////////////////////////////////////////////////////////////////////////
void deleteTBO( GLuint* tbo)
{
glBindBuffer( 1, *tbo);
glDeleteBuffers( 1, tbo);
CUDA_SAFE_CALL(cudaGLUnregisterBufferObject(*tbo));
*tbo = 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Set Sphere Vertex Data
////////////////////////////////////////////////////////////////////////////////
static void setSphereVertex(float3* data, int slice, int stack) {
float PI = 3.14159265358;
double sl = 2*PI*slice/SPHERE_SLICES;
double st = 2*PI*stack/SPHERE_STACKS;
data->x = cos(st)*sin(sl) * SPHERE_RADIUS;
data->y = sin(st)*sin(sl) * SPHERE_RADIUS;
data->z = cos(sl) * SPHERE_RADIUS;
}
////////////////////////////////////////////////////////////////////////////////
//! Set Sphere Normal Data
////////////////////////////////////////////////////////////////////////////////
static void setSphereNormal(float3* data, int slice, int stack) {
float PI = 3.14159265358;
double sl = 2*PI*slice/SPHERE_SLICES;
double st = 2*PI*stack/SPHERE_STACKS;
data->x = cos(st)*sin(sl);
data->y = sin(st)*sin(sl);
data->z = cos(sl);
}
////////////////////////////////////////////////////////////////////////////////
//! Set Vertex Buffer Data
////////////////////////////////////////////////////////////////////////////////
void setVertexBufferData()
{
int slice, stack;
int i;
// upload vertex points data
glBindBuffer(GL_ARRAY_BUFFER, sphereVerts);
float3* verts =( float3*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
i = 0;
for (slice=0; slice<SPHERE_SLICES/2; slice++) {
for (stack=0; stack<=SPHERE_STACKS; stack++) {
setSphereVertex(&verts[i++], slice, stack);
setSphereVertex(&verts[i++], slice+1, stack);
}
}
glUnmapBuffer(GL_ARRAY_BUFFER);
// upload vertex normal data
glBindBuffer(GL_ARRAY_BUFFER, sphereNormals);
float3* normals =( float3*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
i = 0;
for (slice=0; slice<SPHERE_SLICES/2; slice++) {
for (stack=0; stack<=SPHERE_STACKS; stack++) {
setSphereNormal(&normals[i++], slice, stack);
setSphereNormal(&normals[i++], slice+1, stack);
}
}
glUnmapBuffer(GL_ARRAY_BUFFER);
}
////////////////////////////////////////////////////////////////////////////////
//! Display callback
////////////////////////////////////////////////////////////////////////////////
void display()
{
//CUDA start Timing
CUT_SAFE_CALL( cutStartTimer( timer));
// run CUDA kernel to generate vertex positions
runCuda();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// set view matrix
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//zoom
glTranslatef(0.0, 0.0, translate_z);
//move
glRotatef(rotate_x, 1.0, 0.0, 0.0);
glRotatef(rotate_y, 0.0, 0.0, 1.0);
//Set light position
glLightfv(GL_LIGHT0, GL_POSITION, LIGHT_POSITION);
//Draw Boid Agents in default state
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_BUFFER_EXT, Boid_default_displacementTex);
//loop
for (int i=0; i< get_agent_Boid_default_count(); i++){
glVertexAttrib1f(vs_mapIndex, (float)i);
//draw using vertex and attribute data on the gpu (fast)
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, sphereVerts);
glVertexPointer(3, GL_FLOAT, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, sphereNormals);
glNormalPointer(GL_FLOAT, 0, 0);
glDrawArrays(GL_TRIANGLE_STRIP, 0, SPHERE_SLICES * (SPHERE_STACKS+1));
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
}
//CUDA stop timing
cudaThreadSynchronize();
glFlush();
CUT_SAFE_CALL( cutStopTimer( timer));
if(frame_count == average){
char title [100];
sprintf(title, "Execution & Rendering Total: %f (FPS)", average/(cutGetTimerValue( timer)/1000.0f));
glutSetWindowTitle(title);
//reset
frame_count = 0;
CUT_SAFE_CALL( cutResetTimer( timer));
}else{
frame_count++;
}
glutSwapBuffers();
glutPostRedisplay();
}
////////////////////////////////////////////////////////////////////////////////
//! Keyboard events handler
////////////////////////////////////////////////////////////////////////////////
void keyboard( unsigned char key, int /*x*/, int /*y*/)
{
switch( key) {
case( 27) :
deleteVBO( &sphereVerts);
deleteVBO( &sphereNormals);
deleteTBO( &Boid_default_tbo);
exit( 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Mouse event handlers
////////////////////////////////////////////////////////////////////////////////
void mouse(int button, int state, int x, int y)
{
if (state == GLUT_DOWN) {
mouse_buttons |= 1<<button;
} else if (state == GLUT_UP) {
mouse_buttons = 0;
}
mouse_old_x = x;
mouse_old_y = y;
glutPostRedisplay();
}
void motion(int x, int y)
{
float dx, dy;
dx = x - mouse_old_x;
dy = y - mouse_old_y;
if (mouse_buttons & 1) {
rotate_x += dy * 0.2;
rotate_y += dx * 0.2;
} else if (mouse_buttons & 4) {
translate_z += dy * VIEW_DISTANCE* 0.001;
}
mouse_old_x = x;
mouse_old_y = y;
}
void checkGLError(){
int Error;
if((Error = glGetError()) != GL_NO_ERROR)
{
const char* Message = (const char*)gluErrorString(Error);
fprintf(stderr, "OpenGL Error : %s\n", Message);
}
}
|
c9764104c64b632d50f010134aed6e46b8a2ea73.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cuNSearch.h"
#include <algorithm>
#include <iostream>
#include <limits>
#include <stdio.h>
#include <inttypes.h>
#include <stdint.h>
#include <chrono>
#include <thread>
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/gather.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/fill.h>
#ifdef DEBUG
#define PRINT_STATS true
#define USE_TIMING(x) x;
#else
#define PRINT_STATS false
#define USE_TIMING(x)
#endif
#include "Timing.h"
#include "NotImplementedException.h"
#include "cuNSearchDeviceData.h"
#include "PointSetImplementation.h"
#include "cuNSearchKernels.cuh"
#include "cuda_helper.h"
namespace cuNSearch
{
NeighborhoodSearch::NeighborhoodSearch(Real searchRadius)
{
deviceData = std::make_unique<cuNSearchDeviceData>(searchRadius);
set_radius(searchRadius);
}
NeighborhoodSearch::~NeighborhoodSearch()
{
}
unsigned int NeighborhoodSearch::add_point_set(Real const* x, std::size_t n, bool is_dynamic,
bool search_neighbors, bool find_neighbors, void *user_data)
{
auto index = pointSets.size();
pointSets.push_back(PointSet(x, n, is_dynamic, user_data));
m_activation_table.add_point_set(search_neighbors, find_neighbors);
for (auto &pointSet : pointSets)
{
pointSet.neighbors.resize(pointSets.size());
}
return static_cast<unsigned int>(index);
}
void NeighborhoodSearch::set_radius(Real r)
{
this->searchRadius = r;
deviceData->setSearchRadius(r);
isInitialized = false;
}
void NeighborhoodSearch::z_sort()
{
//Do nothing as the sort step is part of the main implementation
}
void
NeighborhoodSearch::resize_point_set(unsigned int index, Real const* x, std::size_t size)
{
pointSets[index].resize(x, size);
}
void
NeighborhoodSearch::update_activation_table()
{
//Update neighborhood search data structures after changing the activation table.
//If general find_neighbors() function is called there is no requirement to manually update the point sets.
}
void
NeighborhoodSearch::updatePointSet(PointSet &pointSet)
{
USE_TIMING(Timing::startTiming("Update point sets - copyParticleData"));
pointSet.impl->copyToDevice();
USE_TIMING(Timing::stopTiming(PRINT_STATS));
USE_TIMING(Timing::startTiming("Update point sets - computeMinMax"));
deviceData->computeMinMax(pointSet);
USE_TIMING(Timing::stopTiming(PRINT_STATS));
USE_TIMING(Timing::startTiming("Update point sets - computeCellInformation"));
deviceData->computeCellInformation(pointSet);
USE_TIMING(Timing::stopTiming(PRINT_STATS));
}
void
NeighborhoodSearch::find_neighbors(bool points_changed_)
{
if (points_changed_ || !isInitialized)
{
for (auto &pointSet : pointSets)
{
if (!isInitialized || pointSet.is_dynamic())
{
updatePointSet(pointSet);
}
}
}
isInitialized = true;
for (unsigned int i = 0; i < pointSets.size(); i++)
{
for (unsigned int j = 0; j < pointSets.size(); j++)
{
if (m_activation_table.is_active(i, j))
{
auto &queryPointSet = pointSets[i];
auto &pointSet = pointSets[j];
deviceData->computeNeighborhood(queryPointSet, pointSet, j);
}
}
}
}
void
NeighborhoodSearch::find_neighbors(unsigned int point_set_id, unsigned int point_index, std::vector<std::vector<unsigned int>> &neighbors)
{
throw new NotImplementedException("NeighborhoodSearch::find_neighbors()");
}
void
NeighborhoodSearch::update_point_sets()
{
for (unsigned int i = 0; i < pointSets.size(); i++)
{
update_point_set(i);
}
}
void
NeighborhoodSearch::update_point_set(int i)
{
updatePointSet(pointSets[i]);
}
}
|
c9764104c64b632d50f010134aed6e46b8a2ea73.cu
|
#include "cuNSearch.h"
#include <algorithm>
#include <iostream>
#include <limits>
#include <stdio.h>
#include <inttypes.h>
#include <stdint.h>
#include <chrono>
#include <thread>
#include <cuda_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/gather.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/fill.h>
#ifdef DEBUG
#define PRINT_STATS true
#define USE_TIMING(x) x;
#else
#define PRINT_STATS false
#define USE_TIMING(x)
#endif
#include "Timing.h"
#include "NotImplementedException.h"
#include "cuNSearchDeviceData.h"
#include "PointSetImplementation.h"
#include "cuNSearchKernels.cuh"
#include "cuda_helper.h"
namespace cuNSearch
{
NeighborhoodSearch::NeighborhoodSearch(Real searchRadius)
{
deviceData = std::make_unique<cuNSearchDeviceData>(searchRadius);
set_radius(searchRadius);
}
NeighborhoodSearch::~NeighborhoodSearch()
{
}
unsigned int NeighborhoodSearch::add_point_set(Real const* x, std::size_t n, bool is_dynamic,
bool search_neighbors, bool find_neighbors, void *user_data)
{
auto index = pointSets.size();
pointSets.push_back(PointSet(x, n, is_dynamic, user_data));
m_activation_table.add_point_set(search_neighbors, find_neighbors);
for (auto &pointSet : pointSets)
{
pointSet.neighbors.resize(pointSets.size());
}
return static_cast<unsigned int>(index);
}
void NeighborhoodSearch::set_radius(Real r)
{
this->searchRadius = r;
deviceData->setSearchRadius(r);
isInitialized = false;
}
void NeighborhoodSearch::z_sort()
{
//Do nothing as the sort step is part of the main implementation
}
void
NeighborhoodSearch::resize_point_set(unsigned int index, Real const* x, std::size_t size)
{
pointSets[index].resize(x, size);
}
void
NeighborhoodSearch::update_activation_table()
{
//Update neighborhood search data structures after changing the activation table.
//If general find_neighbors() function is called there is no requirement to manually update the point sets.
}
void
NeighborhoodSearch::updatePointSet(PointSet &pointSet)
{
USE_TIMING(Timing::startTiming("Update point sets - copyParticleData"));
pointSet.impl->copyToDevice();
USE_TIMING(Timing::stopTiming(PRINT_STATS));
USE_TIMING(Timing::startTiming("Update point sets - computeMinMax"));
deviceData->computeMinMax(pointSet);
USE_TIMING(Timing::stopTiming(PRINT_STATS));
USE_TIMING(Timing::startTiming("Update point sets - computeCellInformation"));
deviceData->computeCellInformation(pointSet);
USE_TIMING(Timing::stopTiming(PRINT_STATS));
}
void
NeighborhoodSearch::find_neighbors(bool points_changed_)
{
if (points_changed_ || !isInitialized)
{
for (auto &pointSet : pointSets)
{
if (!isInitialized || pointSet.is_dynamic())
{
updatePointSet(pointSet);
}
}
}
isInitialized = true;
for (unsigned int i = 0; i < pointSets.size(); i++)
{
for (unsigned int j = 0; j < pointSets.size(); j++)
{
if (m_activation_table.is_active(i, j))
{
auto &queryPointSet = pointSets[i];
auto &pointSet = pointSets[j];
deviceData->computeNeighborhood(queryPointSet, pointSet, j);
}
}
}
}
void
NeighborhoodSearch::find_neighbors(unsigned int point_set_id, unsigned int point_index, std::vector<std::vector<unsigned int>> &neighbors)
{
throw new NotImplementedException("NeighborhoodSearch::find_neighbors()");
}
void
NeighborhoodSearch::update_point_sets()
{
for (unsigned int i = 0; i < pointSets.size(); i++)
{
update_point_set(i);
}
}
void
NeighborhoodSearch::update_point_set(int i)
{
updatePointSet(pointSets[i]);
}
}
|
75a3e683d8b2f05b5402e309dadf49cf4755a04c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <cstdio>
#include <vector>
#include <helper_cuda.h>
#include <helper_timer.h>
using namespace std;
const char *sSampleName = "P2P (Peer-to-Peer) GPU Bandwidth Latency Test";
typedef enum
{
P2P_WRITE = 0,
P2P_READ = 1,
}P2PDataTransfer;
//Macro for checking cuda errors following a cuda launch or api call
#define cudaCheckError() { \
hipError_t e=hipGetLastError(); \
if(e!=hipSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
__global__ void delay(volatile int *flag, unsigned long long timeout_ns = 10000000000)
{
// Wait until the application notifies us that it has completed queuing up the
// experiment, or timeout and exit, allowing the application to make progress
register unsigned long long start_time, sample_time;
asm("mov.u64 %0, %%globaltimer;" : "=l"(start_time));
while (!*flag) {
asm("mov.u64 %0, %%globaltimer;" : "=l"(sample_time));
if (sample_time - start_time > timeout_ns) {
break;
}
}
}
///////////////////////////////////////////////////////////////////////////
//Print help screen
///////////////////////////////////////////////////////////////////////////
void printHelp(void)
{
printf("Usage: p2pBandwidthLatencyTest [OPTION]...\n");
printf("Tests bandwidth/latency of GPU pairs using P2P and without P2P\n");
printf("\n");
printf("Options:\n");
printf("--help\t\tDisplay this help menu\n");
printf("--p2p_read\tUse P2P reads for data transfers between GPU pairs and show corresponding results.\n \t\tDefault used is P2P write operation.\n");
}
void checkP2Paccess(int numGPUs)
{
for (int i = 0; i < numGPUs; i++) {
hipSetDevice(i);
cudaCheckError();
for (int j = 0; j < numGPUs; j++) {
int access;
if (i != j) {
hipDeviceCanAccessPeer(&access, i, j);
cudaCheckError();
printf("Device=%d %s Access Peer Device=%d\n", i, access ? "CAN" : "CANNOT", j);
}
}
}
printf("\n***NOTE: In case a device doesn't have P2P access to other one, it falls back to normal memcopy procedure.\nSo you can see lesser Bandwidth (GB/s) and unstable Latency (us) in those cases.\n\n");
}
void outputBandwidthMatrix(int numGPUs, bool p2p, P2PDataTransfer p2p_method)
{
int numElems = 10000000;
int repeat = 5;
volatile int *flag = NULL;
vector<int *> buffers(numGPUs);
vector<int *> buffersD2D(numGPUs); // buffer for D2D, that is, intra-GPU copy
vector<hipEvent_t> start(numGPUs);
vector<hipEvent_t> stop(numGPUs);
vector<hipStream_t> stream(numGPUs);
hipHostMalloc((void **)&flag, sizeof(*flag), hipHostMallocPortable);
cudaCheckError();
for (int d = 0; d < numGPUs; d++) {
hipSetDevice(d);
hipStreamCreateWithFlags(&stream[d], hipStreamNonBlocking);
hipMalloc(&buffers[d], numElems * sizeof(int));
cudaCheckError();
hipMalloc(&buffersD2D[d], numElems * sizeof(int));
cudaCheckError();
hipEventCreate(&start[d]);
cudaCheckError();
hipEventCreate(&stop[d]);
cudaCheckError();
}
vector<double> bandwidthMatrix(numGPUs * numGPUs);
for (int i = 0; i < numGPUs; i++) {
hipSetDevice(i);
for (int j = 0; j < numGPUs; j++) {
int access;
if (p2p) {
hipDeviceCanAccessPeer(&access, i, j);
if (access) {
hipDeviceEnablePeerAccess(j, 0 );
cudaCheckError();
hipSetDevice(j);
cudaCheckError();
hipDeviceEnablePeerAccess(i, 0 );
cudaCheckError();
hipSetDevice(i);
cudaCheckError();
}
}
hipStreamSynchronize(stream[i]);
cudaCheckError();
// Block the stream until all the work is queued up
// DANGER! - hipMemcpy*Async may infinitely block waiting for
// room to push the operation, so keep the number of repeatitions
// relatively low. Higher repeatitions will cause the delay kernel
// to timeout and lead to unstable results.
*flag = 0;
hipLaunchKernelGGL(( delay), dim3(1), dim3(1), 0, stream[i], flag);
cudaCheckError();
hipEventRecord(start[i], stream[i]);
cudaCheckError();
if (i == j) {
// Perform intra-GPU, D2D copies
for (int r = 0; r < repeat; r++) {
hipMemcpyPeerAsync(buffers[i], i, buffersD2D[i], i, sizeof(int)*numElems, stream[i]);
}
}
else {
if (p2p_method == P2P_WRITE)
{
for (int r = 0; r < repeat; r++) {
// Perform P2P writes
hipMemcpyPeerAsync(buffers[j], j, buffers[i], i, sizeof(int)*numElems, stream[i]);
}
}
else
{
for (int r = 0; r < repeat; r++) {
// Perform P2P reads
hipMemcpyPeerAsync(buffers[i], i, buffers[j], j, sizeof(int)*numElems, stream[i]);
}
}
}
hipEventRecord(stop[i], stream[i]);
cudaCheckError();
// Release the queued events
*flag = 1;
hipStreamSynchronize(stream[i]);
cudaCheckError();
float time_ms;
hipEventElapsedTime(&time_ms, start[i], stop[i]);
double time_s = time_ms / 1e3;
double gb = numElems * sizeof(int) * repeat / (double)1e9;
if (i == j) {
gb *= 2; //must count both the read and the write here
}
bandwidthMatrix[i * numGPUs + j] = gb / time_s;
if (p2p && access) {
hipDeviceDisablePeerAccess(j);
hipSetDevice(j);
hipDeviceDisablePeerAccess(i);
hipSetDevice(i);
cudaCheckError();
}
}
}
printf(" D\\D");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", bandwidthMatrix[i * numGPUs + j]);
}
printf("\n");
}
for (int d = 0; d < numGPUs; d++) {
hipSetDevice(d);
hipFree(buffers[d]);
hipFree(buffersD2D[d]);
cudaCheckError();
hipEventDestroy(start[d]);
cudaCheckError();
hipEventDestroy(stop[d]);
cudaCheckError();
hipStreamDestroy(stream[d]);
cudaCheckError();
}
hipHostFree((void *)flag);
cudaCheckError();
}
void outputBidirectionalBandwidthMatrix(int numGPUs, bool p2p)
{
int numElems = 10000000;
int repeat = 5;
volatile int *flag = NULL;
vector<int *> buffers(numGPUs);
vector<int *> buffersD2D(numGPUs);
vector<hipEvent_t> start(numGPUs);
vector<hipEvent_t> stop(numGPUs);
vector<hipStream_t> stream0(numGPUs);
vector<hipStream_t> stream1(numGPUs);
hipHostMalloc((void **)&flag, sizeof(*flag), hipHostMallocPortable);
cudaCheckError();
for (int d = 0; d < numGPUs; d++) {
hipSetDevice(d);
hipMalloc(&buffers[d], numElems * sizeof(int));
hipMalloc(&buffersD2D[d], numElems * sizeof(int));
cudaCheckError();
hipEventCreate(&start[d]);
cudaCheckError();
hipEventCreate(&stop[d]);
cudaCheckError();
hipStreamCreateWithFlags(&stream0[d], hipStreamNonBlocking);
cudaCheckError();
hipStreamCreateWithFlags(&stream1[d], hipStreamNonBlocking);
cudaCheckError();
}
vector<double> bandwidthMatrix(numGPUs * numGPUs);
for (int i = 0; i < numGPUs; i++) {
hipSetDevice(i);
for (int j = 0; j < numGPUs; j++) {
int access;
if (p2p) {
hipDeviceCanAccessPeer(&access, i, j);
if (access) {
hipSetDevice(i);
hipDeviceEnablePeerAccess(j, 0);
cudaCheckError();
hipSetDevice(j);
hipDeviceEnablePeerAccess(i, 0);
cudaCheckError();
}
}
hipSetDevice(i);
hipStreamSynchronize(stream0[i]);
hipStreamSynchronize(stream1[j]);
cudaCheckError();
// Block the stream until all the work is queued up
// DANGER! - hipMemcpy*Async may infinitely block waiting for
// room to push the operation, so keep the number of repeatitions
// relatively low. Higher repeatitions will cause the delay kernel
// to timeout and lead to unstable results.
*flag = 0;
hipSetDevice(i);
// No need to block stream1 since it'll be blocked on stream0's event
hipLaunchKernelGGL(( delay), dim3(1), dim3(1), 0, stream0[i], flag);
cudaCheckError();
// Force stream1 not to start until stream0 does, in order to ensure
// the events on stream0 fully encompass the time needed for all operations
hipEventRecord(start[i], stream0[i]);
hipStreamWaitEvent(stream1[j], start[i], 0);
if (i == j) {
// For intra-GPU perform 2 memcopies buffersD2D <-> buffers
for (int r = 0; r < repeat; r++) {
hipMemcpyPeerAsync(buffers[i], i, buffersD2D[i], i, sizeof(int)*numElems, stream0[i]);
hipMemcpyPeerAsync(buffersD2D[i], i, buffers[i], i, sizeof(int)*numElems, stream1[i]);
}
}
else {
for (int r = 0; r < repeat; r++) {
hipMemcpyPeerAsync(buffers[i], i, buffers[j], j, sizeof(int)*numElems, stream1[j]);
hipMemcpyPeerAsync(buffers[j], j, buffers[i], i, sizeof(int)*numElems, stream0[i]);
}
}
// Notify stream0 that stream1 is complete and record the time of
// the total transaction
hipEventRecord(stop[j], stream1[j]);
hipStreamWaitEvent(stream0[i], stop[j], 0);
hipEventRecord(stop[i], stream0[i]);
// Release the queued operations
*flag = 1;
hipStreamSynchronize(stream0[i]);
hipStreamSynchronize(stream1[j]);
cudaCheckError();
float time_ms;
hipEventElapsedTime(&time_ms, start[i], stop[i]);
double time_s = time_ms / 1e3;
double gb = 2.0 * numElems * sizeof(int) * repeat / (double)1e9;
if (i == j) {
gb *= 2; //must count both the read and the write here
}
bandwidthMatrix[i * numGPUs + j] = gb / time_s;
if (p2p && access) {
hipSetDevice(i);
hipDeviceDisablePeerAccess(j);
hipSetDevice(j);
hipDeviceDisablePeerAccess(i);
}
}
}
printf(" D\\D");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", bandwidthMatrix[i * numGPUs + j]);
}
printf("\n");
}
for (int d = 0; d < numGPUs; d++) {
hipSetDevice(d);
hipFree(buffers[d]);
hipFree(buffersD2D[d]);
cudaCheckError();
hipEventDestroy(start[d]);
cudaCheckError();
hipEventDestroy(stop[d]);
cudaCheckError();
hipStreamDestroy(stream0[d]);
cudaCheckError();
hipStreamDestroy(stream1[d]);
cudaCheckError();
}
hipHostFree((void *)flag);
cudaCheckError();
}
void outputLatencyMatrix(int numGPUs, bool p2p, P2PDataTransfer p2p_method)
{
int repeat = 100;
volatile int *flag = NULL;
StopWatchInterface *stopWatch = NULL;
vector<int *> buffers(numGPUs);
vector<int *> buffersD2D(numGPUs); // buffer for D2D, that is, intra-GPU copy
vector<hipStream_t> stream(numGPUs);
vector<hipEvent_t> start(numGPUs);
vector<hipEvent_t> stop(numGPUs);
hipHostMalloc((void **)&flag, sizeof(*flag), hipHostMallocPortable);
cudaCheckError();
if (!sdkCreateTimer(&stopWatch)) {
printf("Failed to create stop watch\n");
exit(EXIT_FAILURE);
}
sdkStartTimer(&stopWatch);
for (int d = 0; d < numGPUs; d++) {
hipSetDevice(d);
hipStreamCreateWithFlags(&stream[d], hipStreamNonBlocking);
hipMalloc(&buffers[d], 1);
hipMalloc(&buffersD2D[d], 1);
cudaCheckError();
hipEventCreate(&start[d]);
cudaCheckError();
hipEventCreate(&stop[d]);
cudaCheckError();
}
vector<double> gpuLatencyMatrix(numGPUs * numGPUs);
vector<double> cpuLatencyMatrix(numGPUs * numGPUs);
for (int i = 0; i < numGPUs; i++) {
hipSetDevice(i);
for (int j = 0; j < numGPUs; j++) {
int access;
if (p2p) {
hipDeviceCanAccessPeer(&access, i, j);
if (access) {
hipDeviceEnablePeerAccess(j, 0);
cudaCheckError();
hipSetDevice(j);
hipDeviceEnablePeerAccess(i, 0);
hipSetDevice(i);
cudaCheckError();
}
}
hipStreamSynchronize(stream[i]);
cudaCheckError();
// Block the stream until all the work is queued up
// DANGER! - hipMemcpy*Async may infinitely block waiting for
// room to push the operation, so keep the number of repeatitions
// relatively low. Higher repeatitions will cause the delay kernel
// to timeout and lead to unstable results.
*flag = 0;
hipLaunchKernelGGL(( delay), dim3(1), dim3(1), 0, stream[i], flag);
cudaCheckError();
hipEventRecord(start[i], stream[i]);
sdkResetTimer(&stopWatch);
if (i == j) {
// Perform intra-GPU, D2D copies
for (int r = 0; r < repeat; r++) {
hipMemcpyPeerAsync(buffers[i], i, buffersD2D[i], i, 1, stream[i]);
}
}
else {
if (p2p_method == P2P_WRITE)
{
for (int r = 0; r < repeat; r++) {
// Peform P2P writes
hipMemcpyPeerAsync(buffers[j], j, buffers[i], i, 1, stream[i]);
}
}
else
{
for (int r = 0; r < repeat; r++) {
// Peform P2P reads
hipMemcpyPeerAsync(buffers[i], i, buffers[j], j, 1, stream[i]);
}
}
}
float cpu_time_ms = sdkGetTimerValue(&stopWatch);
hipEventRecord(stop[i], stream[i]);
// Now that the work has been queued up, release the stream
*flag = 1;
hipStreamSynchronize(stream[i]);
cudaCheckError();
float gpu_time_ms;
hipEventElapsedTime(&gpu_time_ms, start[i], stop[i]);
gpuLatencyMatrix[i * numGPUs + j] = gpu_time_ms * 1e3 / repeat;
cpuLatencyMatrix[i * numGPUs + j] = cpu_time_ms * 1e3 / repeat;
if (p2p && access) {
hipDeviceDisablePeerAccess(j);
hipSetDevice(j);
hipDeviceDisablePeerAccess(i);
hipSetDevice(i);
cudaCheckError();
}
}
}
printf(" GPU");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", gpuLatencyMatrix[i * numGPUs + j]);
}
printf("\n");
}
printf("\n CPU");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", cpuLatencyMatrix[i * numGPUs + j]);
}
printf("\n");
}
for (int d = 0; d < numGPUs; d++) {
hipSetDevice(d);
hipFree(buffers[d]);
hipFree(buffersD2D[d]);
cudaCheckError();
hipEventDestroy(start[d]);
cudaCheckError();
hipEventDestroy(stop[d]);
cudaCheckError();
hipStreamDestroy(stream[d]);
cudaCheckError();
}
sdkDeleteTimer(&stopWatch);
hipHostFree((void *)flag);
cudaCheckError();
}
int main(int argc, char **argv)
{
int numGPUs;
P2PDataTransfer p2p_method = P2P_WRITE;
hipGetDeviceCount(&numGPUs);
cudaCheckError();
//process command line args
if (checkCmdLineFlag(argc, (const char**)argv, "help"))
{
printHelp();
return 0;
}
if (checkCmdLineFlag(argc, (const char**)argv, "p2p_read"))
{
p2p_method = P2P_READ;
}
printf("[%s]\n", sSampleName);
//output devices
for (int i = 0; i < numGPUs; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
cudaCheckError();
printf("Device: %d, %s, pciBusID: %x, pciDeviceID: %x, pciDomainID:%x\n", i, prop.name, prop.pciBusID, prop.pciDeviceID, prop.pciDomainID);
}
checkP2Paccess(numGPUs);
//Check peer-to-peer connectivity
printf("P2P Connectivity Matrix\n");
printf(" D\\D");
for (int j = 0; j < numGPUs; j++) {
printf("%6d", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d\t", i);
for (int j = 0; j < numGPUs; j++) {
if (i != j) {
int access;
hipDeviceCanAccessPeer(&access, i, j);
cudaCheckError();
printf("%6d", (access) ? 1 : 0);
}
else {
printf("%6d", 1);
}
}
printf("\n");
}
printf("Unidirectional P2P=Disabled Bandwidth Matrix (GB/s)\n");
outputBandwidthMatrix(numGPUs, false, P2P_WRITE);
printf("Unidirectional P2P=Enabled Bandwidth (P2P Writes) Matrix (GB/s)\n");
outputBandwidthMatrix(numGPUs, true, P2P_WRITE);
if (p2p_method == P2P_READ)
{
printf("Unidirectional P2P=Enabled Bandwidth (P2P Reads) Matrix (GB/s)\n");
outputBandwidthMatrix(numGPUs, true, p2p_method);
}
printf("Bidirectional P2P=Disabled Bandwidth Matrix (GB/s)\n");
outputBidirectionalBandwidthMatrix(numGPUs, false);
printf("Bidirectional P2P=Enabled Bandwidth Matrix (GB/s)\n");
outputBidirectionalBandwidthMatrix(numGPUs, true);
printf("P2P=Disabled Latency Matrix (us)\n");
outputLatencyMatrix(numGPUs, false, P2P_WRITE);
printf("P2P=Enabled Latency (P2P Writes) Matrix (us)\n");
outputLatencyMatrix(numGPUs, true, P2P_WRITE);
if (p2p_method == P2P_READ)
{
printf("P2P=Enabled Latency (P2P Reads) Matrix (us)\n");
outputLatencyMatrix(numGPUs, true, p2p_method);
}
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
exit(EXIT_SUCCESS);
}
|
75a3e683d8b2f05b5402e309dadf49cf4755a04c.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <cstdio>
#include <vector>
#include <helper_cuda.h>
#include <helper_timer.h>
using namespace std;
const char *sSampleName = "P2P (Peer-to-Peer) GPU Bandwidth Latency Test";
typedef enum
{
P2P_WRITE = 0,
P2P_READ = 1,
}P2PDataTransfer;
//Macro for checking cuda errors following a cuda launch or api call
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
__global__ void delay(volatile int *flag, unsigned long long timeout_ns = 10000000000)
{
// Wait until the application notifies us that it has completed queuing up the
// experiment, or timeout and exit, allowing the application to make progress
register unsigned long long start_time, sample_time;
asm("mov.u64 %0, %%globaltimer;" : "=l"(start_time));
while (!*flag) {
asm("mov.u64 %0, %%globaltimer;" : "=l"(sample_time));
if (sample_time - start_time > timeout_ns) {
break;
}
}
}
///////////////////////////////////////////////////////////////////////////
//Print help screen
///////////////////////////////////////////////////////////////////////////
void printHelp(void)
{
printf("Usage: p2pBandwidthLatencyTest [OPTION]...\n");
printf("Tests bandwidth/latency of GPU pairs using P2P and without P2P\n");
printf("\n");
printf("Options:\n");
printf("--help\t\tDisplay this help menu\n");
printf("--p2p_read\tUse P2P reads for data transfers between GPU pairs and show corresponding results.\n \t\tDefault used is P2P write operation.\n");
}
void checkP2Paccess(int numGPUs)
{
for (int i = 0; i < numGPUs; i++) {
cudaSetDevice(i);
cudaCheckError();
for (int j = 0; j < numGPUs; j++) {
int access;
if (i != j) {
cudaDeviceCanAccessPeer(&access, i, j);
cudaCheckError();
printf("Device=%d %s Access Peer Device=%d\n", i, access ? "CAN" : "CANNOT", j);
}
}
}
printf("\n***NOTE: In case a device doesn't have P2P access to other one, it falls back to normal memcopy procedure.\nSo you can see lesser Bandwidth (GB/s) and unstable Latency (us) in those cases.\n\n");
}
void outputBandwidthMatrix(int numGPUs, bool p2p, P2PDataTransfer p2p_method)
{
int numElems = 10000000;
int repeat = 5;
volatile int *flag = NULL;
vector<int *> buffers(numGPUs);
vector<int *> buffersD2D(numGPUs); // buffer for D2D, that is, intra-GPU copy
vector<cudaEvent_t> start(numGPUs);
vector<cudaEvent_t> stop(numGPUs);
vector<cudaStream_t> stream(numGPUs);
cudaHostAlloc((void **)&flag, sizeof(*flag), cudaHostAllocPortable);
cudaCheckError();
for (int d = 0; d < numGPUs; d++) {
cudaSetDevice(d);
cudaStreamCreateWithFlags(&stream[d], cudaStreamNonBlocking);
cudaMalloc(&buffers[d], numElems * sizeof(int));
cudaCheckError();
cudaMalloc(&buffersD2D[d], numElems * sizeof(int));
cudaCheckError();
cudaEventCreate(&start[d]);
cudaCheckError();
cudaEventCreate(&stop[d]);
cudaCheckError();
}
vector<double> bandwidthMatrix(numGPUs * numGPUs);
for (int i = 0; i < numGPUs; i++) {
cudaSetDevice(i);
for (int j = 0; j < numGPUs; j++) {
int access;
if (p2p) {
cudaDeviceCanAccessPeer(&access, i, j);
if (access) {
cudaDeviceEnablePeerAccess(j, 0 );
cudaCheckError();
cudaSetDevice(j);
cudaCheckError();
cudaDeviceEnablePeerAccess(i, 0 );
cudaCheckError();
cudaSetDevice(i);
cudaCheckError();
}
}
cudaStreamSynchronize(stream[i]);
cudaCheckError();
// Block the stream until all the work is queued up
// DANGER! - cudaMemcpy*Async may infinitely block waiting for
// room to push the operation, so keep the number of repeatitions
// relatively low. Higher repeatitions will cause the delay kernel
// to timeout and lead to unstable results.
*flag = 0;
delay<<< 1, 1, 0, stream[i]>>>(flag);
cudaCheckError();
cudaEventRecord(start[i], stream[i]);
cudaCheckError();
if (i == j) {
// Perform intra-GPU, D2D copies
for (int r = 0; r < repeat; r++) {
cudaMemcpyPeerAsync(buffers[i], i, buffersD2D[i], i, sizeof(int)*numElems, stream[i]);
}
}
else {
if (p2p_method == P2P_WRITE)
{
for (int r = 0; r < repeat; r++) {
// Perform P2P writes
cudaMemcpyPeerAsync(buffers[j], j, buffers[i], i, sizeof(int)*numElems, stream[i]);
}
}
else
{
for (int r = 0; r < repeat; r++) {
// Perform P2P reads
cudaMemcpyPeerAsync(buffers[i], i, buffers[j], j, sizeof(int)*numElems, stream[i]);
}
}
}
cudaEventRecord(stop[i], stream[i]);
cudaCheckError();
// Release the queued events
*flag = 1;
cudaStreamSynchronize(stream[i]);
cudaCheckError();
float time_ms;
cudaEventElapsedTime(&time_ms, start[i], stop[i]);
double time_s = time_ms / 1e3;
double gb = numElems * sizeof(int) * repeat / (double)1e9;
if (i == j) {
gb *= 2; //must count both the read and the write here
}
bandwidthMatrix[i * numGPUs + j] = gb / time_s;
if (p2p && access) {
cudaDeviceDisablePeerAccess(j);
cudaSetDevice(j);
cudaDeviceDisablePeerAccess(i);
cudaSetDevice(i);
cudaCheckError();
}
}
}
printf(" D\\D");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", bandwidthMatrix[i * numGPUs + j]);
}
printf("\n");
}
for (int d = 0; d < numGPUs; d++) {
cudaSetDevice(d);
cudaFree(buffers[d]);
cudaFree(buffersD2D[d]);
cudaCheckError();
cudaEventDestroy(start[d]);
cudaCheckError();
cudaEventDestroy(stop[d]);
cudaCheckError();
cudaStreamDestroy(stream[d]);
cudaCheckError();
}
cudaFreeHost((void *)flag);
cudaCheckError();
}
void outputBidirectionalBandwidthMatrix(int numGPUs, bool p2p)
{
int numElems = 10000000;
int repeat = 5;
volatile int *flag = NULL;
vector<int *> buffers(numGPUs);
vector<int *> buffersD2D(numGPUs);
vector<cudaEvent_t> start(numGPUs);
vector<cudaEvent_t> stop(numGPUs);
vector<cudaStream_t> stream0(numGPUs);
vector<cudaStream_t> stream1(numGPUs);
cudaHostAlloc((void **)&flag, sizeof(*flag), cudaHostAllocPortable);
cudaCheckError();
for (int d = 0; d < numGPUs; d++) {
cudaSetDevice(d);
cudaMalloc(&buffers[d], numElems * sizeof(int));
cudaMalloc(&buffersD2D[d], numElems * sizeof(int));
cudaCheckError();
cudaEventCreate(&start[d]);
cudaCheckError();
cudaEventCreate(&stop[d]);
cudaCheckError();
cudaStreamCreateWithFlags(&stream0[d], cudaStreamNonBlocking);
cudaCheckError();
cudaStreamCreateWithFlags(&stream1[d], cudaStreamNonBlocking);
cudaCheckError();
}
vector<double> bandwidthMatrix(numGPUs * numGPUs);
for (int i = 0; i < numGPUs; i++) {
cudaSetDevice(i);
for (int j = 0; j < numGPUs; j++) {
int access;
if (p2p) {
cudaDeviceCanAccessPeer(&access, i, j);
if (access) {
cudaSetDevice(i);
cudaDeviceEnablePeerAccess(j, 0);
cudaCheckError();
cudaSetDevice(j);
cudaDeviceEnablePeerAccess(i, 0);
cudaCheckError();
}
}
cudaSetDevice(i);
cudaStreamSynchronize(stream0[i]);
cudaStreamSynchronize(stream1[j]);
cudaCheckError();
// Block the stream until all the work is queued up
// DANGER! - cudaMemcpy*Async may infinitely block waiting for
// room to push the operation, so keep the number of repeatitions
// relatively low. Higher repeatitions will cause the delay kernel
// to timeout and lead to unstable results.
*flag = 0;
cudaSetDevice(i);
// No need to block stream1 since it'll be blocked on stream0's event
delay<<< 1, 1, 0, stream0[i]>>>(flag);
cudaCheckError();
// Force stream1 not to start until stream0 does, in order to ensure
// the events on stream0 fully encompass the time needed for all operations
cudaEventRecord(start[i], stream0[i]);
cudaStreamWaitEvent(stream1[j], start[i], 0);
if (i == j) {
// For intra-GPU perform 2 memcopies buffersD2D <-> buffers
for (int r = 0; r < repeat; r++) {
cudaMemcpyPeerAsync(buffers[i], i, buffersD2D[i], i, sizeof(int)*numElems, stream0[i]);
cudaMemcpyPeerAsync(buffersD2D[i], i, buffers[i], i, sizeof(int)*numElems, stream1[i]);
}
}
else {
for (int r = 0; r < repeat; r++) {
cudaMemcpyPeerAsync(buffers[i], i, buffers[j], j, sizeof(int)*numElems, stream1[j]);
cudaMemcpyPeerAsync(buffers[j], j, buffers[i], i, sizeof(int)*numElems, stream0[i]);
}
}
// Notify stream0 that stream1 is complete and record the time of
// the total transaction
cudaEventRecord(stop[j], stream1[j]);
cudaStreamWaitEvent(stream0[i], stop[j], 0);
cudaEventRecord(stop[i], stream0[i]);
// Release the queued operations
*flag = 1;
cudaStreamSynchronize(stream0[i]);
cudaStreamSynchronize(stream1[j]);
cudaCheckError();
float time_ms;
cudaEventElapsedTime(&time_ms, start[i], stop[i]);
double time_s = time_ms / 1e3;
double gb = 2.0 * numElems * sizeof(int) * repeat / (double)1e9;
if (i == j) {
gb *= 2; //must count both the read and the write here
}
bandwidthMatrix[i * numGPUs + j] = gb / time_s;
if (p2p && access) {
cudaSetDevice(i);
cudaDeviceDisablePeerAccess(j);
cudaSetDevice(j);
cudaDeviceDisablePeerAccess(i);
}
}
}
printf(" D\\D");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", bandwidthMatrix[i * numGPUs + j]);
}
printf("\n");
}
for (int d = 0; d < numGPUs; d++) {
cudaSetDevice(d);
cudaFree(buffers[d]);
cudaFree(buffersD2D[d]);
cudaCheckError();
cudaEventDestroy(start[d]);
cudaCheckError();
cudaEventDestroy(stop[d]);
cudaCheckError();
cudaStreamDestroy(stream0[d]);
cudaCheckError();
cudaStreamDestroy(stream1[d]);
cudaCheckError();
}
cudaFreeHost((void *)flag);
cudaCheckError();
}
void outputLatencyMatrix(int numGPUs, bool p2p, P2PDataTransfer p2p_method)
{
int repeat = 100;
volatile int *flag = NULL;
StopWatchInterface *stopWatch = NULL;
vector<int *> buffers(numGPUs);
vector<int *> buffersD2D(numGPUs); // buffer for D2D, that is, intra-GPU copy
vector<cudaStream_t> stream(numGPUs);
vector<cudaEvent_t> start(numGPUs);
vector<cudaEvent_t> stop(numGPUs);
cudaHostAlloc((void **)&flag, sizeof(*flag), cudaHostAllocPortable);
cudaCheckError();
if (!sdkCreateTimer(&stopWatch)) {
printf("Failed to create stop watch\n");
exit(EXIT_FAILURE);
}
sdkStartTimer(&stopWatch);
for (int d = 0; d < numGPUs; d++) {
cudaSetDevice(d);
cudaStreamCreateWithFlags(&stream[d], cudaStreamNonBlocking);
cudaMalloc(&buffers[d], 1);
cudaMalloc(&buffersD2D[d], 1);
cudaCheckError();
cudaEventCreate(&start[d]);
cudaCheckError();
cudaEventCreate(&stop[d]);
cudaCheckError();
}
vector<double> gpuLatencyMatrix(numGPUs * numGPUs);
vector<double> cpuLatencyMatrix(numGPUs * numGPUs);
for (int i = 0; i < numGPUs; i++) {
cudaSetDevice(i);
for (int j = 0; j < numGPUs; j++) {
int access;
if (p2p) {
cudaDeviceCanAccessPeer(&access, i, j);
if (access) {
cudaDeviceEnablePeerAccess(j, 0);
cudaCheckError();
cudaSetDevice(j);
cudaDeviceEnablePeerAccess(i, 0);
cudaSetDevice(i);
cudaCheckError();
}
}
cudaStreamSynchronize(stream[i]);
cudaCheckError();
// Block the stream until all the work is queued up
// DANGER! - cudaMemcpy*Async may infinitely block waiting for
// room to push the operation, so keep the number of repeatitions
// relatively low. Higher repeatitions will cause the delay kernel
// to timeout and lead to unstable results.
*flag = 0;
delay<<< 1, 1, 0, stream[i]>>>(flag);
cudaCheckError();
cudaEventRecord(start[i], stream[i]);
sdkResetTimer(&stopWatch);
if (i == j) {
// Perform intra-GPU, D2D copies
for (int r = 0; r < repeat; r++) {
cudaMemcpyPeerAsync(buffers[i], i, buffersD2D[i], i, 1, stream[i]);
}
}
else {
if (p2p_method == P2P_WRITE)
{
for (int r = 0; r < repeat; r++) {
// Peform P2P writes
cudaMemcpyPeerAsync(buffers[j], j, buffers[i], i, 1, stream[i]);
}
}
else
{
for (int r = 0; r < repeat; r++) {
// Peform P2P reads
cudaMemcpyPeerAsync(buffers[i], i, buffers[j], j, 1, stream[i]);
}
}
}
float cpu_time_ms = sdkGetTimerValue(&stopWatch);
cudaEventRecord(stop[i], stream[i]);
// Now that the work has been queued up, release the stream
*flag = 1;
cudaStreamSynchronize(stream[i]);
cudaCheckError();
float gpu_time_ms;
cudaEventElapsedTime(&gpu_time_ms, start[i], stop[i]);
gpuLatencyMatrix[i * numGPUs + j] = gpu_time_ms * 1e3 / repeat;
cpuLatencyMatrix[i * numGPUs + j] = cpu_time_ms * 1e3 / repeat;
if (p2p && access) {
cudaDeviceDisablePeerAccess(j);
cudaSetDevice(j);
cudaDeviceDisablePeerAccess(i);
cudaSetDevice(i);
cudaCheckError();
}
}
}
printf(" GPU");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", gpuLatencyMatrix[i * numGPUs + j]);
}
printf("\n");
}
printf("\n CPU");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", cpuLatencyMatrix[i * numGPUs + j]);
}
printf("\n");
}
for (int d = 0; d < numGPUs; d++) {
cudaSetDevice(d);
cudaFree(buffers[d]);
cudaFree(buffersD2D[d]);
cudaCheckError();
cudaEventDestroy(start[d]);
cudaCheckError();
cudaEventDestroy(stop[d]);
cudaCheckError();
cudaStreamDestroy(stream[d]);
cudaCheckError();
}
sdkDeleteTimer(&stopWatch);
cudaFreeHost((void *)flag);
cudaCheckError();
}
int main(int argc, char **argv)
{
int numGPUs;
P2PDataTransfer p2p_method = P2P_WRITE;
cudaGetDeviceCount(&numGPUs);
cudaCheckError();
//process command line args
if (checkCmdLineFlag(argc, (const char**)argv, "help"))
{
printHelp();
return 0;
}
if (checkCmdLineFlag(argc, (const char**)argv, "p2p_read"))
{
p2p_method = P2P_READ;
}
printf("[%s]\n", sSampleName);
//output devices
for (int i = 0; i < numGPUs; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
cudaCheckError();
printf("Device: %d, %s, pciBusID: %x, pciDeviceID: %x, pciDomainID:%x\n", i, prop.name, prop.pciBusID, prop.pciDeviceID, prop.pciDomainID);
}
checkP2Paccess(numGPUs);
//Check peer-to-peer connectivity
printf("P2P Connectivity Matrix\n");
printf(" D\\D");
for (int j = 0; j < numGPUs; j++) {
printf("%6d", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d\t", i);
for (int j = 0; j < numGPUs; j++) {
if (i != j) {
int access;
cudaDeviceCanAccessPeer(&access, i, j);
cudaCheckError();
printf("%6d", (access) ? 1 : 0);
}
else {
printf("%6d", 1);
}
}
printf("\n");
}
printf("Unidirectional P2P=Disabled Bandwidth Matrix (GB/s)\n");
outputBandwidthMatrix(numGPUs, false, P2P_WRITE);
printf("Unidirectional P2P=Enabled Bandwidth (P2P Writes) Matrix (GB/s)\n");
outputBandwidthMatrix(numGPUs, true, P2P_WRITE);
if (p2p_method == P2P_READ)
{
printf("Unidirectional P2P=Enabled Bandwidth (P2P Reads) Matrix (GB/s)\n");
outputBandwidthMatrix(numGPUs, true, p2p_method);
}
printf("Bidirectional P2P=Disabled Bandwidth Matrix (GB/s)\n");
outputBidirectionalBandwidthMatrix(numGPUs, false);
printf("Bidirectional P2P=Enabled Bandwidth Matrix (GB/s)\n");
outputBidirectionalBandwidthMatrix(numGPUs, true);
printf("P2P=Disabled Latency Matrix (us)\n");
outputLatencyMatrix(numGPUs, false, P2P_WRITE);
printf("P2P=Enabled Latency (P2P Writes) Matrix (us)\n");
outputLatencyMatrix(numGPUs, true, P2P_WRITE);
if (p2p_method == P2P_READ)
{
printf("P2P=Enabled Latency (P2P Reads) Matrix (us)\n");
outputLatencyMatrix(numGPUs, true, p2p_method);
}
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
exit(EXIT_SUCCESS);
}
|
18ad9b786e95abb96fcb9ced0e16967c57de8b69.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
18ad9b786e95abb96fcb9ced0e16967c57de8b69.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
af0a2672530759bb8d73eef4d18d60f8cd8871f4.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime_api.h>
#include <cuml/experimental/fil/detail/raft_proto/buffer.hpp>
#include <cuml/experimental/fil/detail/raft_proto/cuda_check.hpp>
#include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp>
#include <cuml/experimental/fil/detail/raft_proto/device_type.hpp>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <iostream>
namespace raft_proto {
__global__ void check_buffer_access(int* buf)
{
if (buf[0] == 1) { buf[0] = 4; }
if (buf[1] == 2) { buf[1] = 5; }
if (buf[2] == 3) { buf[2] = 6; }
}
TEST(Buffer, device_buffer_access)
{
auto data = std::vector<int>{1, 2, 3};
auto expected = std::vector<int>{4, 5, 6};
auto buf = buffer<int>(
buffer<int>(data.data(), data.size(), device_type::cpu), device_type::gpu, 0, cuda_stream{});
hipLaunchKernelGGL(( check_buffer_access), dim3(1), dim3(1), 0, 0, buf.data());
auto data_out = std::vector<int>(expected.size());
auto host_buf = buffer<int>(data_out.data(), data_out.size(), device_type::cpu);
copy<true>(host_buf, buf);
ASSERT_EQ(hipStreamSynchronize(cuda_stream{}), hipSuccess);
EXPECT_THAT(data_out, testing::ElementsAreArray(expected));
}
} // namespace raft_proto
|
af0a2672530759bb8d73eef4d18d60f8cd8871f4.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime_api.h>
#include <cuml/experimental/fil/detail/raft_proto/buffer.hpp>
#include <cuml/experimental/fil/detail/raft_proto/cuda_check.hpp>
#include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp>
#include <cuml/experimental/fil/detail/raft_proto/device_type.hpp>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <iostream>
namespace raft_proto {
__global__ void check_buffer_access(int* buf)
{
if (buf[0] == 1) { buf[0] = 4; }
if (buf[1] == 2) { buf[1] = 5; }
if (buf[2] == 3) { buf[2] = 6; }
}
TEST(Buffer, device_buffer_access)
{
auto data = std::vector<int>{1, 2, 3};
auto expected = std::vector<int>{4, 5, 6};
auto buf = buffer<int>(
buffer<int>(data.data(), data.size(), device_type::cpu), device_type::gpu, 0, cuda_stream{});
check_buffer_access<<<1, 1>>>(buf.data());
auto data_out = std::vector<int>(expected.size());
auto host_buf = buffer<int>(data_out.data(), data_out.size(), device_type::cpu);
copy<true>(host_buf, buf);
ASSERT_EQ(cudaStreamSynchronize(cuda_stream{}), cudaSuccess);
EXPECT_THAT(data_out, testing::ElementsAreArray(expected));
}
} // namespace raft_proto
|
05fedc16c85bb683ca2c90036019eb8acf7b21f8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../inc/yyfnutil.h"
int main(){
hipblasHandle_t cn;
CublasCreate(&cn);
int m = 256, n = 256, k = 128;
float *A = (float*) Malloc(m*k*sizeof(float));
for(int i = 0; i < m; i += 2){
for(int j = 0; j < k; j++){
A[i*k+j] = 1.0f;
}
for(int j = 0; j < k; j++){
A[i*k+k+j] = 0.0f;
}
}
float *b = (float*) Malloc(k*n*sizeof(float));
for(int i = 0; i < k; i++){
for(int j = 0; j < n; j +=2){
b[i*n+j] = 1.0f;
b[i*n+j+1] = 0.0f;
}
}
float *y = (float*) Malloc(m*n*sizeof(float));
float *d_A;
CudaMalloc((void**)&d_A, m*k*sizeof(float));
CudaMemcpy(d_A, A, m*k*sizeof(float), hipMemcpyHostToDevice);
float *d_b;
CudaMalloc((void**)&d_b, k*n*sizeof(float));
hipMemcpy(d_b, b, k*n*sizeof(float), hipMemcpyHostToDevice);
float *d_y;
CudaMalloc((void**)&d_y, m*n*sizeof(float));
float alpha = 1.0f, beta = 0.0f;
CublasSgemm(cn, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &alpha, d_A, k, d_b, n, &beta, d_y, n);
CudaDeviceSynchronize();
CudaMemcpy(y, d_y, m*n*sizeof(float), hipMemcpyDeviceToHost);
for(int i = 0; i < m; i++){
printf("%d ", i);
for(int j = 0; j < n; j++)
printf(" %d", (int)y[i*n+j]);
printf("\n\n");
}
free(A);
free(b);
free(y);
CudaFree(d_A);
CudaFree(d_b);
CudaFree(d_y);
CublasDestroy(cn);
return 0;
}
|
05fedc16c85bb683ca2c90036019eb8acf7b21f8.cu
|
#include "../inc/yyfnutil.h"
int main(){
cublasHandle_t cn;
CublasCreate(&cn);
int m = 256, n = 256, k = 128;
float *A = (float*) Malloc(m*k*sizeof(float));
for(int i = 0; i < m; i += 2){
for(int j = 0; j < k; j++){
A[i*k+j] = 1.0f;
}
for(int j = 0; j < k; j++){
A[i*k+k+j] = 0.0f;
}
}
float *b = (float*) Malloc(k*n*sizeof(float));
for(int i = 0; i < k; i++){
for(int j = 0; j < n; j +=2){
b[i*n+j] = 1.0f;
b[i*n+j+1] = 0.0f;
}
}
float *y = (float*) Malloc(m*n*sizeof(float));
float *d_A;
CudaMalloc((void**)&d_A, m*k*sizeof(float));
CudaMemcpy(d_A, A, m*k*sizeof(float), cudaMemcpyHostToDevice);
float *d_b;
CudaMalloc((void**)&d_b, k*n*sizeof(float));
cudaMemcpy(d_b, b, k*n*sizeof(float), cudaMemcpyHostToDevice);
float *d_y;
CudaMalloc((void**)&d_y, m*n*sizeof(float));
float alpha = 1.0f, beta = 0.0f;
CublasSgemm(cn, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, d_A, k, d_b, n, &beta, d_y, n);
CudaDeviceSynchronize();
CudaMemcpy(y, d_y, m*n*sizeof(float), cudaMemcpyDeviceToHost);
for(int i = 0; i < m; i++){
printf("%d ", i);
for(int j = 0; j < n; j++)
printf(" %d", (int)y[i*n+j]);
printf("\n\n");
}
free(A);
free(b);
free(y);
CudaFree(d_A);
CudaFree(d_b);
CudaFree(d_y);
CublasDestroy(cn);
return 0;
}
|
6de7ac5911421f3c79ec40351ebe44cf2a375570.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include <random>
#include <iostream>
#include <cmath>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "cpuGroupby.h"
#include "groupby_hash.cuh"
// is there dynamic size constant memory?
__constant__ reductionType ops_c[512];
#include "groupby_hash_templates.cu"
size_t size_alignment(size_t size, size_t alignment)
{
return (size + alignment - 1) / alignment;
}
void groupby_hash_GPU(const int hash_size, const int* key_columns_h, int num_key_columns, int num_key_rows,
const int* value_columns_h, int num_value_columns, int num_value_rows,
reductionType* ops, int num_ops, int* output_keys, int* output_values, int &num_output_rows)
{
#ifdef DEBUG
constexpr unsigned int BLOCKDIM = 512;
#else
constexpr unsigned int BLOCKDIM = 1024;
#endif
unsigned int HASH_TABLE_SIZE = hash_size;
#ifndef TESLA
constexpr unsigned int GRIDDIM = 40;
#else
constexpr unsigned int GRIDDIM = 112;
#endif
using Tval = int; // replace int with actual variable type if needed;
//set restarting flags;
int hashsize_mutiplier = 1;
int* overflow_flag = NULL;
hipMallocManaged(&overflow_flag,sizeof(int));
overflow_flag[0] = 0; // No overflow happens
// variableAllocating
int* key_columns_d = NULL;
int* value_columns_d = NULL;
int* hash_key_idx_d = NULL;
int* hash_count_d = NULL;
int* hash_results_d = NULL;
gpuErrchk(hipMalloc(&key_columns_d, sizeof(int)*num_key_columns*num_key_rows));
gpuErrchk(hipMalloc(&value_columns_d, sizeof(int)*num_value_columns*num_value_rows));
// copy to target
gpuErrchk(hipMemcpy(key_columns_d, key_columns_h, sizeof(int)*num_key_columns*num_key_rows, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(value_columns_d, value_columns_h, sizeof(int)*num_value_columns*num_value_rows, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpyToSymbol(ops_c, ops, sizeof(reductionType) * num_ops));
// sample hash table length
#ifdef CPU_SAMPLE
unsigned int predictedLength = predictTableLength_CPU<int>(key_columns_h,
num_key_rows,
num_key_columns);
std::cout << "Predicted Hash Table Length:" << predictedLength << std::endl;
#elif defined(GPU_SAMPLE)
unsigned int* count = NULL;
hiprandState_t* state = NULL;
gpuErrchk(hipMallocManaged(&count, sizeof(unsigned int)*3));
gpuErrchk(hipMalloc(&state, 1*BLOCKDIM*sizeof(hiprandState_t)));
unsigned int iterations = num_key_rows / BLOCKDIM / 100 + 1;
hipLaunchKernelGGL(( fillCURANDState), dim3(1), dim3(BLOCKDIM), 0, 0, state, gen());
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( predictTableLength_GPU<int>), dim3(1), dim3(BLOCKDIM), 0, 0, key_columns_d,
num_key_rows,
num_key_columns,
iterations,
count,
state);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
unsigned int countTotal = count[0] + count[1] + count[2];
float delta = std::sqrt((float)countTotal*((float)countTotal*9 - (float)count[1]*12));
unsigned int predictedLength = 2.6 * ((3*countTotal + delta) / (2*count[1]));
std::cout << "Predicted Hash Table Length:" << predictedLength << std::endl;
#endif
#ifndef PRIVATIZATION
do {
overflow_flag[0] = 0;
gpuErrchk(hipMalloc(&hash_key_idx_d, sizeof(int)*HASH_TABLE_SIZE*hashsize_mutiplier));
gpuErrchk(hipMalloc(&hash_count_d, sizeof(int)*HASH_TABLE_SIZE*hashsize_mutiplier));
gpuErrchk(hipMalloc(&hash_results_d, sizeof(int)*HASH_TABLE_SIZE*num_ops*hashsize_mutiplier));
gpuErrchk(hipMemcpy(key_columns_d, key_columns_h, sizeof(int)*num_key_columns*num_key_rows, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(value_columns_d, value_columns_h, sizeof(int)*num_value_columns*num_value_rows, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( initializeVariable<int>), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, hash_key_idx_d, hash_count_d, hash_results_d, HASH_TABLE_SIZE*hashsize_mutiplier, num_ops);
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( fillTable<int, int>), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, key_columns_d, num_key_rows, num_key_columns,
value_columns_d, num_value_rows, num_value_columns,
hash_key_idx_d, hash_count_d, hash_results_d,
HASH_TABLE_SIZE*hashsize_mutiplier, num_ops, overflow_flag);
gpuErrchk(hipDeviceSynchronize());
printf("The overflow_flag is: %d\n", overflow_flag[0]);
printf("Current hash size is: %d\n", hashsize_mutiplier*HASH_TABLE_SIZE);
if (overflow_flag[0] == 1) {
hashsize_mutiplier *= 3;
hipFree(hash_key_idx_d);
hipFree(hash_count_d);
hipFree(hash_results_d);
hash_key_idx_d = NULL;
hash_count_d = NULL;
hash_results_d = NULL;
}
} while(overflow_flag[0] == 1);
//printf("The overflow_flag is: %d\n", overflow_flag[0]);
#else
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
size_t sharedMemPerBlock = deviceProp.sharedMemPerBlock;
printf("Total amount of sharedmemory per block %u\n", sharedMemPerBlock);
# ifdef TESLA
sharedMemPerBlock = 32 * 1024;
# endif
size_t max_capacity = sharedMemPerBlock - 48; // for some reason 48 is required for reserved variable
size_t s_len_table = max_capacity / (2*sizeof(int) + sizeof(Tval)*num_ops);
size_t sharedMemorySize = 0;
while (true) { // calculate the suitable length of shared memory table
sharedMemorySize = size_alignment(2*sizeof(int)*s_len_table, sizeof(Tval)) * sizeof(int);
sharedMemorySize += sizeof(Tval)*num_ops*s_len_table;
if (sharedMemorySize < max_capacity)
if (s_len_table % 2 == 1) break; // always make length an odd number to avoid serious collision
--s_len_table;
}
printf("Length of Shared Table: %u\n", s_len_table);
printf("Total extern shared memory: %u\n", sharedMemorySize);
hipLaunchKernelGGL(( fillTable_privatization
<int, int>), dim3(GRIDDIM), dim3(BLOCKDIM), sharedMemorySize, 0, key_columns_d, num_key_rows,
num_key_columns, value_columns_d,
num_value_rows, num_value_columns,
hash_key_idx_d, hash_count_d,
hash_results_d, HASH_TABLE_SIZE*hashsize_mutiplier,
s_len_table, num_ops);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
//shrink the hash table to output array
//Create array of idices for hash table
int *seq, *hashTable_idxs;
int hash_table_size_fixed = HASH_TABLE_SIZE*hashsize_mutiplier;
hipMalloc((void**)&seq, HASH_TABLE_SIZE*hashsize_mutiplier*sizeof(int)); //for hash index sequence
hipMalloc((void**)&hashTable_idxs, HASH_TABLE_SIZE*hashsize_mutiplier*sizeof(int)); //for key indexs without -1
thrust::device_ptr<int> hash_d_seq = thrust::device_pointer_cast(seq); //for hash index sequence
thrust::device_ptr<int> hashTable_idxs_d = thrust::device_pointer_cast(hashTable_idxs); //for key indexs without -1
thrust::sequence(thrust::device, hash_d_seq, hash_d_seq + hash_table_size_fixed); //fill hash index seq
//copy hash idex of keys, removeing -1's which signify not used
// copy_if(policy, index seq start, index seq end, hash keys for comparison, result containing idx to non -1's, comparator)
auto newEnd = thrust::copy_if(thrust::device, hash_d_seq, hash_d_seq + hash_table_size_fixed, hash_key_idx_d, hashTable_idxs_d, is_pos());
num_output_rows = newEnd - hashTable_idxs_d;
printf("%d output rows!\n", num_output_rows);
printf("%d hash length!\n", HASH_TABLE_SIZE*hashsize_mutiplier);
int* output_key_columns_d = NULL;
hipMalloc(&output_key_columns_d, sizeof(int)*num_key_columns*num_output_rows);
hipLaunchKernelGGL(( copyUnique<int>), dim3(GRIDDIM),dim3(BLOCKDIM), 0, 0, hashTable_idxs, hash_key_idx_d,key_columns_d, output_key_columns_d, num_output_rows, num_key_columns, num_key_rows);
//gpuErrchk(hipDeviceSynchronize());
int* output_value_columns_d = NULL;
gpuErrchk(hipMalloc(&output_value_columns_d, sizeof(int)*num_value_columns*num_output_rows));
hipLaunchKernelGGL(( copyValues<int>), dim3(GRIDDIM),dim3(BLOCKDIM), 0, 0, hashTable_idxs, hash_results_d,hash_count_d, value_columns_d, output_value_columns_d, num_output_rows, num_value_columns, num_value_rows, num_ops, hash_table_size_fixed);
printf("%d,%d\n",BLOCKDIM,GRIDDIM);
printf("waiting for Sync\n");
gpuErrchk(hipDeviceSynchronize());
// copy back
gpuErrchk(hipMemcpy(output_keys,output_key_columns_d,sizeof(int)*num_key_columns*num_output_rows,hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(output_values,output_value_columns_d,sizeof(int)*num_value_columns*num_output_rows,hipMemcpyDeviceToHost));
// free elements
hipFree(key_columns_d);
hipFree(value_columns_d);
hipFree(hash_key_idx_d);
hipFree(hash_count_d);
hipFree(hash_results_d);
hipFree(output_key_columns_d);
hipFree(output_value_columns_d);
hipFree(seq);
hipFree(hashTable_idxs);
}
|
6de7ac5911421f3c79ec40351ebe44cf2a375570.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include <random>
#include <iostream>
#include <cmath>
#include <curand.h>
#include <curand_kernel.h>
#include "cpuGroupby.h"
#include "groupby_hash.cuh"
// is there dynamic size constant memory?
__constant__ reductionType ops_c[512];
#include "groupby_hash_templates.cu"
size_t size_alignment(size_t size, size_t alignment)
{
return (size + alignment - 1) / alignment;
}
void groupby_hash_GPU(const int hash_size, const int* key_columns_h, int num_key_columns, int num_key_rows,
const int* value_columns_h, int num_value_columns, int num_value_rows,
reductionType* ops, int num_ops, int* output_keys, int* output_values, int &num_output_rows)
{
#ifdef DEBUG
constexpr unsigned int BLOCKDIM = 512;
#else
constexpr unsigned int BLOCKDIM = 1024;
#endif
unsigned int HASH_TABLE_SIZE = hash_size;
#ifndef TESLA
constexpr unsigned int GRIDDIM = 40;
#else
constexpr unsigned int GRIDDIM = 112;
#endif
using Tval = int; // replace int with actual variable type if needed;
//set restarting flags;
int hashsize_mutiplier = 1;
int* overflow_flag = NULL;
cudaMallocManaged(&overflow_flag,sizeof(int));
overflow_flag[0] = 0; // No overflow happens
// variableAllocating
int* key_columns_d = NULL;
int* value_columns_d = NULL;
int* hash_key_idx_d = NULL;
int* hash_count_d = NULL;
int* hash_results_d = NULL;
gpuErrchk(cudaMalloc(&key_columns_d, sizeof(int)*num_key_columns*num_key_rows));
gpuErrchk(cudaMalloc(&value_columns_d, sizeof(int)*num_value_columns*num_value_rows));
// copy to target
gpuErrchk(cudaMemcpy(key_columns_d, key_columns_h, sizeof(int)*num_key_columns*num_key_rows, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(value_columns_d, value_columns_h, sizeof(int)*num_value_columns*num_value_rows, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpyToSymbol(ops_c, ops, sizeof(reductionType) * num_ops));
// sample hash table length
#ifdef CPU_SAMPLE
unsigned int predictedLength = predictTableLength_CPU<int>(key_columns_h,
num_key_rows,
num_key_columns);
std::cout << "Predicted Hash Table Length:" << predictedLength << std::endl;
#elif defined(GPU_SAMPLE)
unsigned int* count = NULL;
curandState* state = NULL;
gpuErrchk(cudaMallocManaged(&count, sizeof(unsigned int)*3));
gpuErrchk(cudaMalloc(&state, 1*BLOCKDIM*sizeof(curandState)));
unsigned int iterations = num_key_rows / BLOCKDIM / 100 + 1;
fillCURANDState<<<1, BLOCKDIM>>>(state, gen());
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
predictTableLength_GPU<int><<<1, BLOCKDIM>>>(key_columns_d,
num_key_rows,
num_key_columns,
iterations,
count,
state);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
unsigned int countTotal = count[0] + count[1] + count[2];
float delta = std::sqrt((float)countTotal*((float)countTotal*9 - (float)count[1]*12));
unsigned int predictedLength = 2.6 * ((3*countTotal + delta) / (2*count[1]));
std::cout << "Predicted Hash Table Length:" << predictedLength << std::endl;
#endif
#ifndef PRIVATIZATION
do {
overflow_flag[0] = 0;
gpuErrchk(cudaMalloc(&hash_key_idx_d, sizeof(int)*HASH_TABLE_SIZE*hashsize_mutiplier));
gpuErrchk(cudaMalloc(&hash_count_d, sizeof(int)*HASH_TABLE_SIZE*hashsize_mutiplier));
gpuErrchk(cudaMalloc(&hash_results_d, sizeof(int)*HASH_TABLE_SIZE*num_ops*hashsize_mutiplier));
gpuErrchk(cudaMemcpy(key_columns_d, key_columns_h, sizeof(int)*num_key_columns*num_key_rows, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(value_columns_d, value_columns_h, sizeof(int)*num_value_columns*num_value_rows, cudaMemcpyHostToDevice));
initializeVariable<int><<<GRIDDIM, BLOCKDIM>>>(hash_key_idx_d, hash_count_d, hash_results_d, HASH_TABLE_SIZE*hashsize_mutiplier, num_ops);
gpuErrchk(cudaDeviceSynchronize());
fillTable<int, int><<<GRIDDIM, BLOCKDIM>>>(key_columns_d, num_key_rows, num_key_columns,
value_columns_d, num_value_rows, num_value_columns,
hash_key_idx_d, hash_count_d, hash_results_d,
HASH_TABLE_SIZE*hashsize_mutiplier, num_ops, overflow_flag);
gpuErrchk(cudaDeviceSynchronize());
printf("The overflow_flag is: %d\n", overflow_flag[0]);
printf("Current hash size is: %d\n", hashsize_mutiplier*HASH_TABLE_SIZE);
if (overflow_flag[0] == 1) {
hashsize_mutiplier *= 3;
cudaFree(hash_key_idx_d);
cudaFree(hash_count_d);
cudaFree(hash_results_d);
hash_key_idx_d = NULL;
hash_count_d = NULL;
hash_results_d = NULL;
}
} while(overflow_flag[0] == 1);
//printf("The overflow_flag is: %d\n", overflow_flag[0]);
#else
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
size_t sharedMemPerBlock = deviceProp.sharedMemPerBlock;
printf("Total amount of sharedmemory per block %u\n", sharedMemPerBlock);
# ifdef TESLA
sharedMemPerBlock = 32 * 1024;
# endif
size_t max_capacity = sharedMemPerBlock - 48; // for some reason 48 is required for reserved variable
size_t s_len_table = max_capacity / (2*sizeof(int) + sizeof(Tval)*num_ops);
size_t sharedMemorySize = 0;
while (true) { // calculate the suitable length of shared memory table
sharedMemorySize = size_alignment(2*sizeof(int)*s_len_table, sizeof(Tval)) * sizeof(int);
sharedMemorySize += sizeof(Tval)*num_ops*s_len_table;
if (sharedMemorySize < max_capacity)
if (s_len_table % 2 == 1) break; // always make length an odd number to avoid serious collision
--s_len_table;
}
printf("Length of Shared Table: %u\n", s_len_table);
printf("Total extern shared memory: %u\n", sharedMemorySize);
fillTable_privatization
<int, int><<<GRIDDIM, BLOCKDIM, sharedMemorySize>>>(key_columns_d, num_key_rows,
num_key_columns, value_columns_d,
num_value_rows, num_value_columns,
hash_key_idx_d, hash_count_d,
hash_results_d, HASH_TABLE_SIZE*hashsize_mutiplier,
s_len_table, num_ops);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
//shrink the hash table to output array
//Create array of idices for hash table
int *seq, *hashTable_idxs;
int hash_table_size_fixed = HASH_TABLE_SIZE*hashsize_mutiplier;
cudaMalloc((void**)&seq, HASH_TABLE_SIZE*hashsize_mutiplier*sizeof(int)); //for hash index sequence
cudaMalloc((void**)&hashTable_idxs, HASH_TABLE_SIZE*hashsize_mutiplier*sizeof(int)); //for key indexs without -1
thrust::device_ptr<int> hash_d_seq = thrust::device_pointer_cast(seq); //for hash index sequence
thrust::device_ptr<int> hashTable_idxs_d = thrust::device_pointer_cast(hashTable_idxs); //for key indexs without -1
thrust::sequence(thrust::device, hash_d_seq, hash_d_seq + hash_table_size_fixed); //fill hash index seq
//copy hash idex of keys, removeing -1's which signify not used
// copy_if(policy, index seq start, index seq end, hash keys for comparison, result containing idx to non -1's, comparator)
auto newEnd = thrust::copy_if(thrust::device, hash_d_seq, hash_d_seq + hash_table_size_fixed, hash_key_idx_d, hashTable_idxs_d, is_pos());
num_output_rows = newEnd - hashTable_idxs_d;
printf("%d output rows!\n", num_output_rows);
printf("%d hash length!\n", HASH_TABLE_SIZE*hashsize_mutiplier);
int* output_key_columns_d = NULL;
cudaMalloc(&output_key_columns_d, sizeof(int)*num_key_columns*num_output_rows);
copyUnique<int><<<GRIDDIM,BLOCKDIM>>>(hashTable_idxs, hash_key_idx_d,key_columns_d, output_key_columns_d, num_output_rows, num_key_columns, num_key_rows);
//gpuErrchk(cudaDeviceSynchronize());
int* output_value_columns_d = NULL;
gpuErrchk(cudaMalloc(&output_value_columns_d, sizeof(int)*num_value_columns*num_output_rows));
copyValues<int><<<GRIDDIM,BLOCKDIM>>>(hashTable_idxs, hash_results_d,hash_count_d, value_columns_d, output_value_columns_d, num_output_rows, num_value_columns, num_value_rows, num_ops, hash_table_size_fixed);
printf("%d,%d\n",BLOCKDIM,GRIDDIM);
printf("waiting for Sync\n");
gpuErrchk(cudaDeviceSynchronize());
// copy back
gpuErrchk(cudaMemcpy(output_keys,output_key_columns_d,sizeof(int)*num_key_columns*num_output_rows,cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(output_values,output_value_columns_d,sizeof(int)*num_value_columns*num_output_rows,cudaMemcpyDeviceToHost));
// free elements
cudaFree(key_columns_d);
cudaFree(value_columns_d);
cudaFree(hash_key_idx_d);
cudaFree(hash_count_d);
cudaFree(hash_results_d);
cudaFree(output_key_columns_d);
cudaFree(output_value_columns_d);
cudaFree(seq);
cudaFree(hashTable_idxs);
}
|
adjacent_difference.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/adjacent_difference.h>
#include <thrust/execution_policy.h>
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2>
__global__ void adjacent_difference_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result)
{
thrust::adjacent_difference(exec, first, last, result);
}
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename BinaryFunction>
__global__ void adjacent_difference_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result, BinaryFunction f)
{
thrust::adjacent_difference(exec, first, last, result, f);
}
template<typename T, typename ExecutionPolicy>
void TestAdjacentDifferenceDevice(ExecutionPolicy exec, const size_t n)
{
thrust::host_vector<T> h_input = unittest::random_samples<T>(n);
thrust::device_vector<T> d_input = h_input;
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::adjacent_difference(h_input.begin(), h_input.end(), h_output.begin());
hipLaunchKernelGGL(( adjacent_difference_kernel), dim3(1),dim3(1), 0, 0, exec, d_input.begin(), d_input.end(), d_output.begin());
{
hipError_t const err = hipDeviceSynchronize();
ASSERT_EQUAL(hipSuccess, err);
}
ASSERT_EQUAL(h_output, d_output);
thrust::adjacent_difference(h_input.begin(), h_input.end(), h_output.begin(), thrust::plus<T>());
hipLaunchKernelGGL(( adjacent_difference_kernel), dim3(1),dim3(1), 0, 0, exec, d_input.begin(), d_input.end(), d_output.begin(), thrust::plus<T>());
{
hipError_t const err = hipDeviceSynchronize();
ASSERT_EQUAL(hipSuccess, err);
}
ASSERT_EQUAL(h_output, d_output);
// in-place operation
thrust::adjacent_difference(h_input.begin(), h_input.end(), h_input.begin(), thrust::plus<T>());
hipLaunchKernelGGL(( adjacent_difference_kernel), dim3(1),dim3(1), 0, 0, exec, d_input.begin(), d_input.end(), d_input.begin(), thrust::plus<T>());
{
hipError_t const err = hipDeviceSynchronize();
ASSERT_EQUAL(hipSuccess, err);
}
ASSERT_EQUAL(h_input, h_output); //computed previously
ASSERT_EQUAL(d_input, d_output); //computed previously
}
template<typename T>
void TestAdjacentDifferenceDeviceSeq(const size_t n)
{
TestAdjacentDifferenceDevice<T>(thrust::seq, n);
}
DECLARE_VARIABLE_UNITTEST(TestAdjacentDifferenceDeviceSeq);
template<typename T>
void TestAdjacentDifferenceDeviceDevice(const size_t n)
{
TestAdjacentDifferenceDevice<T>(thrust::device, n);
}
DECLARE_VARIABLE_UNITTEST(TestAdjacentDifferenceDeviceDevice);
void TestAdjacentDifferenceCudaStreams()
{
hipStream_t s;
hipStreamCreate(&s);
thrust::device_vector<int> input(3);
thrust::device_vector<int> output(3);
input[0] = 1; input[1] = 4; input[2] = 6;
thrust::adjacent_difference(thrust::hip::par.on(s), input.begin(), input.end(), output.begin());
hipStreamSynchronize(s);
ASSERT_EQUAL(output[0], 1);
ASSERT_EQUAL(output[1], 3);
ASSERT_EQUAL(output[2], 2);
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestAdjacentDifferenceCudaStreams);
|
adjacent_difference.cu
|
#include <unittest/unittest.h>
#include <thrust/adjacent_difference.h>
#include <thrust/execution_policy.h>
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2>
__global__ void adjacent_difference_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result)
{
thrust::adjacent_difference(exec, first, last, result);
}
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename BinaryFunction>
__global__ void adjacent_difference_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result, BinaryFunction f)
{
thrust::adjacent_difference(exec, first, last, result, f);
}
template<typename T, typename ExecutionPolicy>
void TestAdjacentDifferenceDevice(ExecutionPolicy exec, const size_t n)
{
thrust::host_vector<T> h_input = unittest::random_samples<T>(n);
thrust::device_vector<T> d_input = h_input;
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::adjacent_difference(h_input.begin(), h_input.end(), h_output.begin());
adjacent_difference_kernel<<<1,1>>>(exec, d_input.begin(), d_input.end(), d_output.begin());
{
cudaError_t const err = cudaDeviceSynchronize();
ASSERT_EQUAL(cudaSuccess, err);
}
ASSERT_EQUAL(h_output, d_output);
thrust::adjacent_difference(h_input.begin(), h_input.end(), h_output.begin(), thrust::plus<T>());
adjacent_difference_kernel<<<1,1>>>(exec, d_input.begin(), d_input.end(), d_output.begin(), thrust::plus<T>());
{
cudaError_t const err = cudaDeviceSynchronize();
ASSERT_EQUAL(cudaSuccess, err);
}
ASSERT_EQUAL(h_output, d_output);
// in-place operation
thrust::adjacent_difference(h_input.begin(), h_input.end(), h_input.begin(), thrust::plus<T>());
adjacent_difference_kernel<<<1,1>>>(exec, d_input.begin(), d_input.end(), d_input.begin(), thrust::plus<T>());
{
cudaError_t const err = cudaDeviceSynchronize();
ASSERT_EQUAL(cudaSuccess, err);
}
ASSERT_EQUAL(h_input, h_output); //computed previously
ASSERT_EQUAL(d_input, d_output); //computed previously
}
template<typename T>
void TestAdjacentDifferenceDeviceSeq(const size_t n)
{
TestAdjacentDifferenceDevice<T>(thrust::seq, n);
}
DECLARE_VARIABLE_UNITTEST(TestAdjacentDifferenceDeviceSeq);
template<typename T>
void TestAdjacentDifferenceDeviceDevice(const size_t n)
{
TestAdjacentDifferenceDevice<T>(thrust::device, n);
}
DECLARE_VARIABLE_UNITTEST(TestAdjacentDifferenceDeviceDevice);
void TestAdjacentDifferenceCudaStreams()
{
cudaStream_t s;
cudaStreamCreate(&s);
thrust::device_vector<int> input(3);
thrust::device_vector<int> output(3);
input[0] = 1; input[1] = 4; input[2] = 6;
thrust::adjacent_difference(thrust::cuda::par.on(s), input.begin(), input.end(), output.begin());
cudaStreamSynchronize(s);
ASSERT_EQUAL(output[0], 1);
ASSERT_EQUAL(output[1], 3);
ASSERT_EQUAL(output[2], 2);
cudaStreamDestroy(s);
}
DECLARE_UNITTEST(TestAdjacentDifferenceCudaStreams);
|
35ed6c62169662d972e0425015587e1c722fc855.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "ATen/TensorUtils.h"
#include "ATen/Dispatch.h"
#include "ATen/NativeFunctions.h"
#include "ATen/hip/AccumulateType.h"
#include "ATen/hip/HIPTensorMethods.cuh"
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHNumerics.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THHUNN/THHHalfAutoNumerics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace at { namespace native {
namespace {
static const int WARP_SIZE = 32;
__device__ __forceinline__ bool warp_has_collision(int val) {
// Compare our value to the values stored in the next 16 lanes,
// wrapping around at 32. If any pair of values is the same than
// there is a collision in the warp.
bool dup = 0;
const int laneId = threadIdx.x % 32;
#pragma unroll
for (int i = 1; i <= 16; i++) {
dup |= (WARP_SHFL(val, (laneId + i) % 32) == val);
}
return __any(dup) != 0;
}
// parallelizes over features
template <typename scalar_t>
__global__ void embedding_backward_feature_kernel(
int64_t* indices, scalar_t* grad, scalar_t* grad_weight,
int64_t num_indices, int64_t stride, int padding_idx) {
const int feature_dim = blockIdx.x * 4 + threadIdx.x / 32;
if (feature_dim >= stride) {
return;
}
// The strategy here is that each warp handles a single feature
// dimension.
// Within that feature dimension, points in the [batch][element]
// dimension can overlap, and we need to determine if threads want
// to add to the gradient in a colliding manner.
// Typically one would use floating-point atomicAdd() to resolve
// these collisions, but that is non-deterministic if there are
// collisions. Non-determinism for this code is really bad,
// especially in RNNs, and is prone to snowballing error.
// In order to get a deterministic order of execution, we handle
// non-colliding updates separately from colliding ones. Colliding
// updates are serialized in their order of execution by using the
// warp-wide collision detector `warp_has_collision`.
const int laneId = threadIdx.x % 32;
for (int64_t i = laneId; i < num_indices; i += WARP_SIZE) {
const int weight_index = (int)indices[i];
if (weight_index == padding_idx) {
continue;
}
auto value = grad[i * stride + feature_dim];
// FIXME: should we accumulate as accreal?
// Check for collision
if (warp_has_collision(weight_index)) {
// Run all lanes sequentially; warp divergence
for (int i = 0; i < WARP_SIZE; ++i) {
if (laneId == i) {
grad_weight[weight_index * stride + feature_dim] += value;
}
}
} else {
// No collision; warp coherence
grad_weight[weight_index * stride + feature_dim] += value;
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = cuda::acc_type<scalar_t>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = scalar_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = scalar_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = scalar_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int dim) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * dim;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = scalar_cast<accscalar_t>(weights[base_index + i]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += ::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t, accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = ::pow(v, scalar_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = scalar_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkContiguous("embedding_backward", indices_arg);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
auto grad_weight = grad_.type().zeros({num_weights, grad_.size(-1)});
int64_t stride = grad_weight.stride(0);
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
if (num_indices <= 768 && !scale_grad_by_freq) {
dim3 grid(THCCeilDiv(stride, (int64_t) 4));
dim3 block(128);
DISPATCH_ALL_FLOATING_TYPES(grad.type(), "embedding_backward", [&]() {
hipLaunchKernelGGL(( embedding_backward_feature_kernel), dim3(grid), dim3(block), 0, stream,
indices.data<int64_t>(),
grad.data<scalar_t>(),
grad_weight.data<scalar_t>(),
num_indices,
stride,
padding_idx);
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
auto sorted_indices = indices.type().tensor(indices.sizes());
auto orig_indices = indices.type().tensor(indices.sizes());
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = indices.type().tensor(indices.sizes());
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
DISPATCH_ALL_FLOATING_TYPES(grad.type(), "embedding_backward", [&]() {
hipLaunchKernelGGL(( embedding_backward_kernel), dim3(grid), dim3(block), 0, stream,
sorted_indices.data<int64_t>(),
orig_indices.data<int64_t>(),
grad.data<scalar_t>(),
grad_weight.data<scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
num_indices,
stride,
padding_idx);
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkContiguous("embedding_renorm_", self_arg);
checkContiguous("embedding_renorm", indices_arg);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_data = device_ptr(indices.data<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contains duplicates which are not
// adjacent
auto unique_indices = indices.type().tensor(indices.numel());
auto unique_data = device_ptr(unique_indices.data<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
DISPATCH_ALL_FLOATING_TYPES(self.type(), "embedding_backward", [&]() {
using accscalar_t = cuda::acc_type<scalar_t>;
hipLaunchKernelGGL(( renorm_kernel), dim3(grid), dim3(block), 128 * sizeof(accscalar_t), stream,
self.data<scalar_t>(),
unique_indices.data<int64_t>(),
scalar_cast<accscalar_t>(max_norm),
scalar_cast<accscalar_t>(norm_type),
dim);
});
THCudaCheck(hipGetLastError());
return self;
}
}} // namespace at::native
|
35ed6c62169662d972e0425015587e1c722fc855.cu
|
#include "ATen/ATen.h"
#include "ATen/TensorUtils.h"
#include "ATen/Dispatch.h"
#include "ATen/NativeFunctions.h"
#include "ATen/cuda/AccumulateType.h"
#include "ATen/cuda/CUDATensorMethods.cuh"
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCNumerics.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace at { namespace native {
namespace {
static const int WARP_SIZE = 32;
__device__ __forceinline__ bool warp_has_collision(int val) {
// Compare our value to the values stored in the next 16 lanes,
// wrapping around at 32. If any pair of values is the same than
// there is a collision in the warp.
bool dup = 0;
const int laneId = threadIdx.x % 32;
#pragma unroll
for (int i = 1; i <= 16; i++) {
dup |= (WARP_SHFL(val, (laneId + i) % 32) == val);
}
return __any(dup) != 0;
}
// parallelizes over features
template <typename scalar_t>
__global__ void embedding_backward_feature_kernel(
int64_t* indices, scalar_t* grad, scalar_t* grad_weight,
int64_t num_indices, int64_t stride, int padding_idx) {
const int feature_dim = blockIdx.x * 4 + threadIdx.x / 32;
if (feature_dim >= stride) {
return;
}
// The strategy here is that each warp handles a single feature
// dimension.
// Within that feature dimension, points in the [batch][element]
// dimension can overlap, and we need to determine if threads want
// to add to the gradient in a colliding manner.
// Typically one would use floating-point atomicAdd() to resolve
// these collisions, but that is non-deterministic if there are
// collisions. Non-determinism for this code is really bad,
// especially in RNNs, and is prone to snowballing error.
// In order to get a deterministic order of execution, we handle
// non-colliding updates separately from colliding ones. Colliding
// updates are serialized in their order of execution by using the
// warp-wide collision detector `warp_has_collision`.
const int laneId = threadIdx.x % 32;
for (int64_t i = laneId; i < num_indices; i += WARP_SIZE) {
const int weight_index = (int)indices[i];
if (weight_index == padding_idx) {
continue;
}
auto value = grad[i * stride + feature_dim];
// FIXME: should we accumulate as accreal?
// Check for collision
if (warp_has_collision(weight_index)) {
// Run all lanes sequentially; warp divergence
for (int i = 0; i < WARP_SIZE; ++i) {
if (laneId == i) {
grad_weight[weight_index * stride + feature_dim] += value;
}
}
} else {
// No collision; warp coherence
grad_weight[weight_index * stride + feature_dim] += value;
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = cuda::acc_type<scalar_t>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = scalar_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = scalar_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = scalar_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int dim) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * dim;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = scalar_cast<accscalar_t>(weights[base_index + i]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += std::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t, accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = std::pow(v, scalar_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = scalar_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkContiguous("embedding_backward", indices_arg);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
auto grad_weight = grad_.type().zeros({num_weights, grad_.size(-1)});
int64_t stride = grad_weight.stride(0);
cudaStream_t stream = globalContext().getCurrentCUDAStream();
if (num_indices <= 768 && !scale_grad_by_freq) {
dim3 grid(THCCeilDiv(stride, (int64_t) 4));
dim3 block(128);
DISPATCH_ALL_FLOATING_TYPES(grad.type(), "embedding_backward", [&]() {
embedding_backward_feature_kernel<<<grid, block, 0, stream>>>(
indices.data<int64_t>(),
grad.data<scalar_t>(),
grad_weight.data<scalar_t>(),
num_indices,
stride,
padding_idx);
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
auto sorted_indices = indices.type().tensor(indices.sizes());
auto orig_indices = indices.type().tensor(indices.sizes());
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = indices.type().tensor(indices.sizes());
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
DISPATCH_ALL_FLOATING_TYPES(grad.type(), "embedding_backward", [&]() {
embedding_backward_kernel<<<grid, block, 0, stream>>>(
sorted_indices.data<int64_t>(),
orig_indices.data<int64_t>(),
grad.data<scalar_t>(),
grad_weight.data<scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
num_indices,
stride,
padding_idx);
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkContiguous("embedding_renorm_", self_arg);
checkContiguous("embedding_renorm", indices_arg);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
cudaStream_t stream = globalContext().getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_data = device_ptr(indices.data<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contains duplicates which are not
// adjacent
auto unique_indices = indices.type().tensor(indices.numel());
auto unique_data = device_ptr(unique_indices.data<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
DISPATCH_ALL_FLOATING_TYPES(self.type(), "embedding_backward", [&]() {
using accscalar_t = cuda::acc_type<scalar_t>;
renorm_kernel<<<grid, block, 128 * sizeof(accscalar_t), stream>>>(
self.data<scalar_t>(),
unique_indices.data<int64_t>(),
scalar_cast<accscalar_t>(max_norm),
scalar_cast<accscalar_t>(norm_type),
dim);
});
THCudaCheck(cudaGetLastError());
return self;
}
}} // namespace at::native
|
027afdf3276e35b301a0e98e5346f8e7ff9cb6a0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pytorch_cuda_helper.hpp"
#include "masked_conv2d_cuda_kernel.cuh"
void MaskedIm2colForwardCUDAKernelLauncher(const Tensor bottom_data,
const Tensor mask_h_idx,
const Tensor mask_w_idx,
Tensor top_data, const int kernel_h,
const int kernel_w, const int pad_h,
const int pad_w) {
int channels = bottom_data.size(1);
int height = bottom_data.size(2);
int width = bottom_data.size(3);
int mask_cnt = mask_h_idx.size(0);
int output_size = mask_cnt * channels;
at::hip::HIPGuardMasqueradingAsCUDA device_guard(bottom_data.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
bottom_data.scalar_type(), "MaskedIm2colLaucherForward", ([&] {
const scalar_t *bottom_data_ = bottom_data.data_ptr<scalar_t>();
const int64_t *mask_h_idx_ = mask_h_idx.data_ptr<int64_t>();
const int64_t *mask_w_idx_ = mask_w_idx.data_ptr<int64_t>();
scalar_t *top_data_ = top_data.data_ptr<scalar_t>();
hipLaunchKernelGGL(( MaskedIm2colForward<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, bottom_data_, height, width, kernel_h, kernel_w,
pad_h, pad_w, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_);
}));
AT_CUDA_CHECK(hipGetLastError());
}
void MaskedCol2imForwardCUDAKernelLauncher(
const Tensor bottom_data, const Tensor mask_h_idx, const Tensor mask_w_idx,
Tensor top_data, const int height, const int width, const int channels) {
int mask_cnt = mask_h_idx.size(0);
int output_size = mask_cnt * channels;
at::hip::HIPGuardMasqueradingAsCUDA device_guard(bottom_data.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
bottom_data.scalar_type(), "MaskedCol2imLaucherForward", ([&] {
const scalar_t *bottom_data_ = bottom_data.data_ptr<scalar_t>();
const int64_t *mask_h_idx_ = mask_h_idx.data_ptr<int64_t>();
const int64_t *mask_w_idx_ = mask_w_idx.data_ptr<int64_t>();
scalar_t *top_data_ = top_data.data_ptr<scalar_t>();
hipLaunchKernelGGL(( MaskedCol2imForward<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, bottom_data_, height, width, channels, mask_h_idx_,
mask_w_idx_, mask_cnt, top_data_);
}));
AT_CUDA_CHECK(hipGetLastError());
}
|
027afdf3276e35b301a0e98e5346f8e7ff9cb6a0.cu
|
#include "pytorch_cuda_helper.hpp"
#include "masked_conv2d_cuda_kernel.cuh"
void MaskedIm2colForwardCUDAKernelLauncher(const Tensor bottom_data,
const Tensor mask_h_idx,
const Tensor mask_w_idx,
Tensor top_data, const int kernel_h,
const int kernel_w, const int pad_h,
const int pad_w) {
int channels = bottom_data.size(1);
int height = bottom_data.size(2);
int width = bottom_data.size(3);
int mask_cnt = mask_h_idx.size(0);
int output_size = mask_cnt * channels;
at::cuda::CUDAGuard device_guard(bottom_data.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
bottom_data.scalar_type(), "MaskedIm2colLaucherForward", ([&] {
const scalar_t *bottom_data_ = bottom_data.data_ptr<scalar_t>();
const int64_t *mask_h_idx_ = mask_h_idx.data_ptr<int64_t>();
const int64_t *mask_w_idx_ = mask_w_idx.data_ptr<int64_t>();
scalar_t *top_data_ = top_data.data_ptr<scalar_t>();
MaskedIm2colForward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, bottom_data_, height, width, kernel_h, kernel_w,
pad_h, pad_w, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
void MaskedCol2imForwardCUDAKernelLauncher(
const Tensor bottom_data, const Tensor mask_h_idx, const Tensor mask_w_idx,
Tensor top_data, const int height, const int width, const int channels) {
int mask_cnt = mask_h_idx.size(0);
int output_size = mask_cnt * channels;
at::cuda::CUDAGuard device_guard(bottom_data.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
bottom_data.scalar_type(), "MaskedCol2imLaucherForward", ([&] {
const scalar_t *bottom_data_ = bottom_data.data_ptr<scalar_t>();
const int64_t *mask_h_idx_ = mask_h_idx.data_ptr<int64_t>();
const int64_t *mask_w_idx_ = mask_w_idx.data_ptr<int64_t>();
scalar_t *top_data_ = top_data.data_ptr<scalar_t>();
MaskedCol2imForward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, bottom_data_, height, width, channels, mask_h_idx_,
mask_w_idx_, mask_cnt, top_data_);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
|
a2a2851ab150edf87c9246bc7bfdb53a460d0ac1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define MAT_TYPE double
#define MAT_SIZE 1024
#define N MAT_SIZE
#define N2 MAT_SIZE*MAT_SIZE
#define BLOCK 256
#define THREAD 512
void stopwatch(int);
__global__ void cuda_mul(MAT_TYPE* A,MAT_TYPE* B,MAT_TYPE* C,int w)
{
int tid,tx,ty;
tx = blockDim.x * blockIdx.x + threadIdx.x;
ty = blockDim.y * blockIdx.y + threadIdx.y;
tid = w*ty + tx;
MAT_TYPE v = 0;
MAT_TYPE a = 0;
MAT_TYPE b = 0;
for(int i=0;i< w;i++)
{
a = A[ty * w + i];
b = B[i * w + tx];
v += a+b;
}
C[tid]= v;
}
|
a2a2851ab150edf87c9246bc7bfdb53a460d0ac1.cu
|
#include "includes.h"
#define MAT_TYPE double
#define MAT_SIZE 1024
#define N MAT_SIZE
#define N2 MAT_SIZE*MAT_SIZE
#define BLOCK 256
#define THREAD 512
void stopwatch(int);
__global__ void cuda_mul(MAT_TYPE* A,MAT_TYPE* B,MAT_TYPE* C,int w)
{
int tid,tx,ty;
tx = blockDim.x * blockIdx.x + threadIdx.x;
ty = blockDim.y * blockIdx.y + threadIdx.y;
tid = w*ty + tx;
MAT_TYPE v = 0;
MAT_TYPE a = 0;
MAT_TYPE b = 0;
for(int i=0;i< w;i++)
{
a = A[ty * w + i];
b = B[i * w + tx];
v += a+b;
}
C[tid]= v;
}
|
b1de12c9c05237414f794babb696fc0f7d092960.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void myfirstkernel(void) {
// Code start here
}
|
b1de12c9c05237414f794babb696fc0f7d092960.cu
|
#include "includes.h"
__global__ void myfirstkernel(void) {
// Code start here
}
|
93459e606f380700640985b1e360f51d73edde8c.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
**********************************************************************
** Copyright (C) 1990, RSA Data Security, Inc. All rights reserved. **
** **
** License to copy and use this software is granted provided that **
** it is identified as the "RSA Data Security, Inc. MD5 Message **
** Digest Algorithm" in all material mentioning or referencing this **
** software or this function. **
** **
** License is also granted to make and use derivative works **
** provided that such works are identified as "derived from the RSA **
** Data Security, Inc. MD5 Message Digest Algorithm" in all **
** material mentioning or referencing the derived work. **
** **
** RSA Data Security, Inc. makes no representations concerning **
** either the merchantability of this software or the suitability **
** of this software for any particular purpose. It is provided "as **
** is" without express or implied warranty of any kind. **
** **
** These notices must be retained in any copies of any part of this **
** documentation and/or software. **
**********************************************************************
*/
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/device_functions.h>
#define MD5_HASH_SIZE 16 //16 bytes = 128 bits
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define FF(a, b, c, d, x, s, ac) \
{(a) += F ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) \
{(a) += G ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) \
{(a) += H ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) \
{(a) += I ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
// __device__ inline void md5Hash(unsigned char* data, uint32_t length, uint32_t *a1, uint32_t *b1, uint32_t *c1, uint32_t *d1){
void md5Hash(unsigned char* data, uint32_t length, char* result){
const uint32_t a0 = 0x67452301;
const uint32_t b0 = 0xEFCDAB89;
const uint32_t c0 = 0x98BADCFE;
const uint32_t d0 = 0x10325476;
uint32_t a = 0;
uint32_t b = 0;
uint32_t c = 0;
uint32_t d = 0;
uint32_t vals[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0};
int i = 0;
for(i=0; i < length; i++){
vals[i / 4] |= data[i] << ((i % 4) * 8);
}
vals[i / 4] |= 0x80 << ((i % 4) * 8);
uint32_t bitlen = length * 8;
#define in0 (vals[0])//x
#define in1 (vals[1])//y
#define in2 (vals[2])//z
#define in3 (vals[3])
#define in4 (vals[4])
#define in5 (vals[5])
#define in6 (vals[6])
#define in7 (vals[7])
#define in8 (vals[8])
#define in9 (vals[9])
#define in10 (vals[10])
#define in11 (vals[11])
#define in12 (vals[12])
#define in13 (vals[13])
#define in14 (bitlen) //w = bit length
#define in15 (0)
//Initialize hash value for this chunk:
a = a0;
b = b0;
c = c0;
d = d0;
/* Round 1 */
#define S11 7
#define S12 12
#define S13 17
#define S14 22
FF ( a, b, c, d, in0, S11, 3614090360); /* 1 */
FF ( d, a, b, c, in1, S12, 3905402710); /* 2 */
FF ( c, d, a, b, in2, S13, 606105819); /* 3 */
FF ( b, c, d, a, in3, S14, 3250441966); /* 4 */
FF ( a, b, c, d, in4, S11, 4118548399); /* 5 */
FF ( d, a, b, c, in5, S12, 1200080426); /* 6 */
FF ( c, d, a, b, in6, S13, 2821735955); /* 7 */
FF ( b, c, d, a, in7, S14, 4249261313); /* 8 */
FF ( a, b, c, d, in8, S11, 1770035416); /* 9 */
FF ( d, a, b, c, in9, S12, 2336552879); /* 10 */
FF ( c, d, a, b, in10, S13, 4294925233); /* 11 */
FF ( b, c, d, a, in11, S14, 2304563134); /* 12 */
FF ( a, b, c, d, in12, S11, 1804603682); /* 13 */
FF ( d, a, b, c, in13, S12, 4254626195); /* 14 */
FF ( c, d, a, b, in14, S13, 2792965006); /* 15 */
FF ( b, c, d, a, in15, S14, 1236535329); /* 16 */
/* Round 2 */
#define S21 5
#define S22 9
#define S23 14
#define S24 20
GG ( a, b, c, d, in1, S21, 4129170786); /* 17 */
GG ( d, a, b, c, in6, S22, 3225465664); /* 18 */
GG ( c, d, a, b, in11, S23, 643717713); /* 19 */
GG ( b, c, d, a, in0, S24, 3921069994); /* 20 */
GG ( a, b, c, d, in5, S21, 3593408605); /* 21 */
GG ( d, a, b, c, in10, S22, 38016083); /* 22 */
GG ( c, d, a, b, in15, S23, 3634488961); /* 23 */
GG ( b, c, d, a, in4, S24, 3889429448); /* 24 */
GG ( a, b, c, d, in9, S21, 568446438); /* 25 */
GG ( d, a, b, c, in14, S22, 3275163606); /* 26 */
GG ( c, d, a, b, in3, S23, 4107603335); /* 27 */
GG ( b, c, d, a, in8, S24, 1163531501); /* 28 */
GG ( a, b, c, d, in13, S21, 2850285829); /* 29 */
GG ( d, a, b, c, in2, S22, 4243563512); /* 30 */
GG ( c, d, a, b, in7, S23, 1735328473); /* 31 */
GG ( b, c, d, a, in12, S24, 2368359562); /* 32 */
/* Round 3 */
#define S31 4
#define S32 11
#define S33 16
#define S34 23
HH ( a, b, c, d, in5, S31, 4294588738); /* 33 */
HH ( d, a, b, c, in8, S32, 2272392833); /* 34 */
HH ( c, d, a, b, in11, S33, 1839030562); /* 35 */
HH ( b, c, d, a, in14, S34, 4259657740); /* 36 */
HH ( a, b, c, d, in1, S31, 2763975236); /* 37 */
HH ( d, a, b, c, in4, S32, 1272893353); /* 38 */
HH ( c, d, a, b, in7, S33, 4139469664); /* 39 */
HH ( b, c, d, a, in10, S34, 3200236656); /* 40 */
HH ( a, b, c, d, in13, S31, 681279174); /* 41 */
HH ( d, a, b, c, in0, S32, 3936430074); /* 42 */
HH ( c, d, a, b, in3, S33, 3572445317); /* 43 */
HH ( b, c, d, a, in6, S34, 76029189); /* 44 */
HH ( a, b, c, d, in9, S31, 3654602809); /* 45 */
HH ( d, a, b, c, in12, S32, 3873151461); /* 46 */
HH ( c, d, a, b, in15, S33, 530742520); /* 47 */
HH ( b, c, d, a, in2, S34, 3299628645); /* 48 */
/* Round 4 */
#define S41 6
#define S42 10
#define S43 15
#define S44 21
II ( a, b, c, d, in0, S41, 4096336452); /* 49 */
II ( d, a, b, c, in7, S42, 1126891415); /* 50 */
II ( c, d, a, b, in14, S43, 2878612391); /* 51 */
II ( b, c, d, a, in5, S44, 4237533241); /* 52 */
II ( a, b, c, d, in12, S41, 1700485571); /* 53 */
II ( d, a, b, c, in3, S42, 2399980690); /* 54 */
II ( c, d, a, b, in10, S43, 4293915773); /* 55 */
II ( b, c, d, a, in1, S44, 2240044497); /* 56 */
II ( a, b, c, d, in8, S41, 1873313359); /* 57 */
II ( d, a, b, c, in15, S42, 4264355552); /* 58 */
II ( c, d, a, b, in6, S43, 2734768916); /* 59 */
II ( b, c, d, a, in13, S44, 1309151649); /* 60 */
II ( a, b, c, d, in4, S41, 4149444226); /* 61 */
II ( d, a, b, c, in11, S42, 3174756917); /* 62 */
II ( c, d, a, b, in2, S43, 718787259); /* 63 */
II ( b, c, d, a, in9, S44, 3951481745); /* 64 */
a += a0;
b += b0;
c += c0;
d += d0;
uint32_t* uint_ptr = (uint32_t*)result;
uint_ptr[0] = a;
uint_ptr[1] = b;
uint_ptr[2] = c;
uint_ptr[3] = d;
}
void check_endian() {
//https://www.geeksforgeeks.org/little-and-big-endian-mystery/
unsigned int i = 1;
char *c = (char*)&i;
printf("System is: ");
if (*c)
printf("Little endian\n");
else
printf("Big endian\n");
}
void print_hash_little_endian(char* result) {
for(int i=0;i<MD5_HASH_SIZE;i++) {
printf("%02x", result[i] & 0xff);
// if ((i+1)%4==0) printf(" ");
}
printf("\n");
fflush(stdout);
}
void print_hash(char* result) {
for(int i=0;i<MD5_HASH_SIZE/4;i++) {
printf("%08x", ((uint32_t*)result)[i] & 0xffffffff);
}
printf("\n");
fflush(stdout);
}
char* get_hex_hash(char* result) {
char* hex_hash = (char*)malloc(MD5_HASH_SIZE*2);
for(int i=0;i<MD5_HASH_SIZE;i++) {
sprintf(&hex_hash[i*2],"%02x", result[i] & 0xff);
// if ((i+1)%4==0) printf(" ");
}
printf("hex: %s\n", hex_hash);
return hex_hash;
}
void convert_to_big_endian(char* result) {
// char temp[2*MD5_HASH_SIZE];
// for(int i=0;i<MD5_HASH_SIZE;i++) {
// // printf("%02X", result[i] & 0xff);
// sprintf((&temp[i*2]), "%02X", result[i] & 0xff);
// // if ((i+1)%4==0) printf(" ");
// }
// printf("%s\n", temp);
// fflush(stdout);
for (int i = 0;i<MD5_HASH_SIZE/4;i++) {
int base = i*4;
char temp = result[base+3];
result[base + 3] = result[base + 0];
result[base+0] = temp;
temp = result[base+2];
result[base+2] = result[base+1];
result[base+1] = temp;
}
}
void hash_input(unsigned char* string, uint length_statement) {
char* result = (char*)malloc(MD5_HASH_SIZE * 8);
// uint32_t str_length = strlen((char*)string);
uint32_t str_length = length_statement;
printf("Size of input: %u\n", str_length);
md5Hash(string, str_length, result);
print_hash_little_endian(result);
// convert_to_big_endian(result);
// print_hash(result);
free(result);
}
bool check_hash() {
char* test_string = "test";
char* expected_output = "098f6bcd4621d373cade4e832627b4f6";
char* result = (char*)malloc(MD5_HASH_SIZE);
uint32_t str_length = strlen((char*)test_string);
md5Hash((unsigned char*)test_string, str_length, result);
char* hash_hex = get_hex_hash(result);
for (int i = 0; i<strlen(expected_output);i++) {
// printf("Hash %c Exp %c\n", hash_hex[i], expected_output[i]);
assert(hash_hex[i] == expected_output[i]);
}
assert(strcmp(hash_hex, expected_output) == 0);
free(hash_hex);
free(result);
return strcmp(result, expected_output) == 0;
}
//(To whom it may concern, null) I, Phillip Stephens, (bequeath, grant) (all, the entirety) of my (personal, null) (belongings, possessions) to my (two, null) children(, Alice and Bob, null). I (give, leave) my (house, home) to Alice and my (red, null) (sports, null) (Ferrari, car) to Bob. (Additionally, In addition), (all, the entirety) of my monetary (assets, accounts) (Id, I would) (like, prefer) to (leave,give) to the (community, local) food (bank, pantry). I (hope, believe) that this (small, null) act of (generosity, kindness) will (affect, help) (many, null) (others, other people) as I too have been (affected, helped) by (so, null) many (more, others).
// (Signed, Sincerely), Phillip Stephens
void generate_statements(char* statement_ptr, int num_statements, int length_statement) {
double num_oct_digits = ceil(log(num_statements) / log(2) / 4);
uint i,j, bin_digit;
char letter;
for (i = 0; i<num_statements; i++) {
sprintf(statement_ptr+(i*length_statement), "%d",i);
}
for (i = 0; i<num_statements; i++) {
for (j=0;j<length_statement;j++) {
printf("%c", statement_ptr[i*length_statement + j]);
}
printf("\n");
}
// printf("Phrase %s", statement_ptr);
}
void check_statements(char* statement_ptr, int num_statements, int length_statement) {
char* target = "ad9225d796d5da7da216af1ea9982079";
char* result = (char*)malloc(MD5_HASH_SIZE * 8);
// uint32_t str_length = strlen((char*)string);
uint32_t str_length = length_statement;
printf("Size of input: %u\n", str_length);
md5Hash(string, str_length, result);
print_hash_little_endian(result);
// convert_to_big_endian(result);
// print_hash(result);
free(result);
}
int main(int argc, char *argv[]) {
// char * string = "test";
// printf("String = %s\n", string);
// check_endian();
// fflush(stdout);
// check_hash();
// hash_input((unsigned char*)string);
uint num_statements = int(pow(2,4));
uint length_statement = 10;
char * statement_ptr= (char*) malloc(num_statements * length_statement);
generate_statements(statement_ptr, num_statements, length_statement);
hash_input((unsigned char *)statement_ptr, length_statement);
for (uint i = 0;i<num_statements;i++) {
hash_input((unsigned char *)(statement_ptr+i*length_statement), length_statement);
}
free(statement_ptr);
}
|
93459e606f380700640985b1e360f51d73edde8c.cu
|
/**
**********************************************************************
** Copyright (C) 1990, RSA Data Security, Inc. All rights reserved. **
** **
** License to copy and use this software is granted provided that **
** it is identified as the "RSA Data Security, Inc. MD5 Message **
** Digest Algorithm" in all material mentioning or referencing this **
** software or this function. **
** **
** License is also granted to make and use derivative works **
** provided that such works are identified as "derived from the RSA **
** Data Security, Inc. MD5 Message Digest Algorithm" in all **
** material mentioning or referencing the derived work. **
** **
** RSA Data Security, Inc. makes no representations concerning **
** either the merchantability of this software or the suitability **
** of this software for any particular purpose. It is provided "as **
** is" without express or implied warranty of any kind. **
** **
** These notices must be retained in any copies of any part of this **
** documentation and/or software. **
**********************************************************************
*/
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <curand_kernel.h>
#include <device_functions.h>
#define MD5_HASH_SIZE 16 //16 bytes = 128 bits
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define FF(a, b, c, d, x, s, ac) \
{(a) += F ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) \
{(a) += G ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) \
{(a) += H ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) \
{(a) += I ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
// __device__ inline void md5Hash(unsigned char* data, uint32_t length, uint32_t *a1, uint32_t *b1, uint32_t *c1, uint32_t *d1){
void md5Hash(unsigned char* data, uint32_t length, char* result){
const uint32_t a0 = 0x67452301;
const uint32_t b0 = 0xEFCDAB89;
const uint32_t c0 = 0x98BADCFE;
const uint32_t d0 = 0x10325476;
uint32_t a = 0;
uint32_t b = 0;
uint32_t c = 0;
uint32_t d = 0;
uint32_t vals[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0};
int i = 0;
for(i=0; i < length; i++){
vals[i / 4] |= data[i] << ((i % 4) * 8);
}
vals[i / 4] |= 0x80 << ((i % 4) * 8);
uint32_t bitlen = length * 8;
#define in0 (vals[0])//x
#define in1 (vals[1])//y
#define in2 (vals[2])//z
#define in3 (vals[3])
#define in4 (vals[4])
#define in5 (vals[5])
#define in6 (vals[6])
#define in7 (vals[7])
#define in8 (vals[8])
#define in9 (vals[9])
#define in10 (vals[10])
#define in11 (vals[11])
#define in12 (vals[12])
#define in13 (vals[13])
#define in14 (bitlen) //w = bit length
#define in15 (0)
//Initialize hash value for this chunk:
a = a0;
b = b0;
c = c0;
d = d0;
/* Round 1 */
#define S11 7
#define S12 12
#define S13 17
#define S14 22
FF ( a, b, c, d, in0, S11, 3614090360); /* 1 */
FF ( d, a, b, c, in1, S12, 3905402710); /* 2 */
FF ( c, d, a, b, in2, S13, 606105819); /* 3 */
FF ( b, c, d, a, in3, S14, 3250441966); /* 4 */
FF ( a, b, c, d, in4, S11, 4118548399); /* 5 */
FF ( d, a, b, c, in5, S12, 1200080426); /* 6 */
FF ( c, d, a, b, in6, S13, 2821735955); /* 7 */
FF ( b, c, d, a, in7, S14, 4249261313); /* 8 */
FF ( a, b, c, d, in8, S11, 1770035416); /* 9 */
FF ( d, a, b, c, in9, S12, 2336552879); /* 10 */
FF ( c, d, a, b, in10, S13, 4294925233); /* 11 */
FF ( b, c, d, a, in11, S14, 2304563134); /* 12 */
FF ( a, b, c, d, in12, S11, 1804603682); /* 13 */
FF ( d, a, b, c, in13, S12, 4254626195); /* 14 */
FF ( c, d, a, b, in14, S13, 2792965006); /* 15 */
FF ( b, c, d, a, in15, S14, 1236535329); /* 16 */
/* Round 2 */
#define S21 5
#define S22 9
#define S23 14
#define S24 20
GG ( a, b, c, d, in1, S21, 4129170786); /* 17 */
GG ( d, a, b, c, in6, S22, 3225465664); /* 18 */
GG ( c, d, a, b, in11, S23, 643717713); /* 19 */
GG ( b, c, d, a, in0, S24, 3921069994); /* 20 */
GG ( a, b, c, d, in5, S21, 3593408605); /* 21 */
GG ( d, a, b, c, in10, S22, 38016083); /* 22 */
GG ( c, d, a, b, in15, S23, 3634488961); /* 23 */
GG ( b, c, d, a, in4, S24, 3889429448); /* 24 */
GG ( a, b, c, d, in9, S21, 568446438); /* 25 */
GG ( d, a, b, c, in14, S22, 3275163606); /* 26 */
GG ( c, d, a, b, in3, S23, 4107603335); /* 27 */
GG ( b, c, d, a, in8, S24, 1163531501); /* 28 */
GG ( a, b, c, d, in13, S21, 2850285829); /* 29 */
GG ( d, a, b, c, in2, S22, 4243563512); /* 30 */
GG ( c, d, a, b, in7, S23, 1735328473); /* 31 */
GG ( b, c, d, a, in12, S24, 2368359562); /* 32 */
/* Round 3 */
#define S31 4
#define S32 11
#define S33 16
#define S34 23
HH ( a, b, c, d, in5, S31, 4294588738); /* 33 */
HH ( d, a, b, c, in8, S32, 2272392833); /* 34 */
HH ( c, d, a, b, in11, S33, 1839030562); /* 35 */
HH ( b, c, d, a, in14, S34, 4259657740); /* 36 */
HH ( a, b, c, d, in1, S31, 2763975236); /* 37 */
HH ( d, a, b, c, in4, S32, 1272893353); /* 38 */
HH ( c, d, a, b, in7, S33, 4139469664); /* 39 */
HH ( b, c, d, a, in10, S34, 3200236656); /* 40 */
HH ( a, b, c, d, in13, S31, 681279174); /* 41 */
HH ( d, a, b, c, in0, S32, 3936430074); /* 42 */
HH ( c, d, a, b, in3, S33, 3572445317); /* 43 */
HH ( b, c, d, a, in6, S34, 76029189); /* 44 */
HH ( a, b, c, d, in9, S31, 3654602809); /* 45 */
HH ( d, a, b, c, in12, S32, 3873151461); /* 46 */
HH ( c, d, a, b, in15, S33, 530742520); /* 47 */
HH ( b, c, d, a, in2, S34, 3299628645); /* 48 */
/* Round 4 */
#define S41 6
#define S42 10
#define S43 15
#define S44 21
II ( a, b, c, d, in0, S41, 4096336452); /* 49 */
II ( d, a, b, c, in7, S42, 1126891415); /* 50 */
II ( c, d, a, b, in14, S43, 2878612391); /* 51 */
II ( b, c, d, a, in5, S44, 4237533241); /* 52 */
II ( a, b, c, d, in12, S41, 1700485571); /* 53 */
II ( d, a, b, c, in3, S42, 2399980690); /* 54 */
II ( c, d, a, b, in10, S43, 4293915773); /* 55 */
II ( b, c, d, a, in1, S44, 2240044497); /* 56 */
II ( a, b, c, d, in8, S41, 1873313359); /* 57 */
II ( d, a, b, c, in15, S42, 4264355552); /* 58 */
II ( c, d, a, b, in6, S43, 2734768916); /* 59 */
II ( b, c, d, a, in13, S44, 1309151649); /* 60 */
II ( a, b, c, d, in4, S41, 4149444226); /* 61 */
II ( d, a, b, c, in11, S42, 3174756917); /* 62 */
II ( c, d, a, b, in2, S43, 718787259); /* 63 */
II ( b, c, d, a, in9, S44, 3951481745); /* 64 */
a += a0;
b += b0;
c += c0;
d += d0;
uint32_t* uint_ptr = (uint32_t*)result;
uint_ptr[0] = a;
uint_ptr[1] = b;
uint_ptr[2] = c;
uint_ptr[3] = d;
}
void check_endian() {
//https://www.geeksforgeeks.org/little-and-big-endian-mystery/
unsigned int i = 1;
char *c = (char*)&i;
printf("System is: ");
if (*c)
printf("Little endian\n");
else
printf("Big endian\n");
}
void print_hash_little_endian(char* result) {
for(int i=0;i<MD5_HASH_SIZE;i++) {
printf("%02x", result[i] & 0xff);
// if ((i+1)%4==0) printf(" ");
}
printf("\n");
fflush(stdout);
}
void print_hash(char* result) {
for(int i=0;i<MD5_HASH_SIZE/4;i++) {
printf("%08x", ((uint32_t*)result)[i] & 0xffffffff);
}
printf("\n");
fflush(stdout);
}
char* get_hex_hash(char* result) {
char* hex_hash = (char*)malloc(MD5_HASH_SIZE*2);
for(int i=0;i<MD5_HASH_SIZE;i++) {
sprintf(&hex_hash[i*2],"%02x", result[i] & 0xff);
// if ((i+1)%4==0) printf(" ");
}
printf("hex: %s\n", hex_hash);
return hex_hash;
}
void convert_to_big_endian(char* result) {
// char temp[2*MD5_HASH_SIZE];
// for(int i=0;i<MD5_HASH_SIZE;i++) {
// // printf("%02X", result[i] & 0xff);
// sprintf((&temp[i*2]), "%02X", result[i] & 0xff);
// // if ((i+1)%4==0) printf(" ");
// }
// printf("%s\n", temp);
// fflush(stdout);
for (int i = 0;i<MD5_HASH_SIZE/4;i++) {
int base = i*4;
char temp = result[base+3];
result[base + 3] = result[base + 0];
result[base+0] = temp;
temp = result[base+2];
result[base+2] = result[base+1];
result[base+1] = temp;
}
}
void hash_input(unsigned char* string, uint length_statement) {
char* result = (char*)malloc(MD5_HASH_SIZE * 8);
// uint32_t str_length = strlen((char*)string);
uint32_t str_length = length_statement;
printf("Size of input: %u\n", str_length);
md5Hash(string, str_length, result);
print_hash_little_endian(result);
// convert_to_big_endian(result);
// print_hash(result);
free(result);
}
bool check_hash() {
char* test_string = "test";
char* expected_output = "098f6bcd4621d373cade4e832627b4f6";
char* result = (char*)malloc(MD5_HASH_SIZE);
uint32_t str_length = strlen((char*)test_string);
md5Hash((unsigned char*)test_string, str_length, result);
char* hash_hex = get_hex_hash(result);
for (int i = 0; i<strlen(expected_output);i++) {
// printf("Hash %c Exp %c\n", hash_hex[i], expected_output[i]);
assert(hash_hex[i] == expected_output[i]);
}
assert(strcmp(hash_hex, expected_output) == 0);
free(hash_hex);
free(result);
return strcmp(result, expected_output) == 0;
}
//(To whom it may concern, null) I, Phillip Stephens, (bequeath, grant) (all, the entirety) of my (personal, null) (belongings, possessions) to my (two, null) children(, Alice and Bob, null). I (give, leave) my (house, home) to Alice and my (red, null) (sports, null) (Ferrari, car) to Bob. (Additionally, In addition), (all, the entirety) of my monetary (assets, accounts) (I’d, I would) (like, prefer) to (leave,give) to the (community, local) food (bank, pantry). I (hope, believe) that this (small, null) act of (generosity, kindness) will (affect, help) (many, null) (others, other people) as I too have been (affected, helped) by (so, null) many (more, others).
// (Signed, Sincerely), Phillip Stephens
void generate_statements(char* statement_ptr, int num_statements, int length_statement) {
double num_oct_digits = ceil(log(num_statements) / log(2) / 4);
uint i,j, bin_digit;
char letter;
for (i = 0; i<num_statements; i++) {
sprintf(statement_ptr+(i*length_statement), "%d",i);
}
for (i = 0; i<num_statements; i++) {
for (j=0;j<length_statement;j++) {
printf("%c", statement_ptr[i*length_statement + j]);
}
printf("\n");
}
// printf("Phrase %s", statement_ptr);
}
void check_statements(char* statement_ptr, int num_statements, int length_statement) {
char* target = "ad9225d796d5da7da216af1ea9982079";
char* result = (char*)malloc(MD5_HASH_SIZE * 8);
// uint32_t str_length = strlen((char*)string);
uint32_t str_length = length_statement;
printf("Size of input: %u\n", str_length);
md5Hash(string, str_length, result);
print_hash_little_endian(result);
// convert_to_big_endian(result);
// print_hash(result);
free(result);
}
int main(int argc, char *argv[]) {
// char * string = "test";
// printf("String = %s\n", string);
// check_endian();
// fflush(stdout);
// check_hash();
// hash_input((unsigned char*)string);
uint num_statements = int(pow(2,4));
uint length_statement = 10;
char * statement_ptr= (char*) malloc(num_statements * length_statement);
generate_statements(statement_ptr, num_statements, length_statement);
hash_input((unsigned char *)statement_ptr, length_statement);
for (uint i = 0;i<num_statements;i++) {
hash_input((unsigned char *)(statement_ptr+i*length_statement), length_statement);
}
free(statement_ptr);
}
|
b26298bd936a6afa1efed642e4845d4be88ae0fa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "countRest.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *bin = NULL;
hipMalloc(&bin, XSIZE*YSIZE);
int *bin_counters = NULL;
hipMalloc(&bin_counters, XSIZE*YSIZE);
const int num_bins = 1;
const int maxBin = 1;
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
countRest), dim3(gridBlock),dim3(threadBlock), 0, 0, bin,bin_counters,num_bins,maxBin,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
countRest), dim3(gridBlock),dim3(threadBlock), 0, 0, bin,bin_counters,num_bins,maxBin,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
countRest), dim3(gridBlock),dim3(threadBlock), 0, 0, bin,bin_counters,num_bins,maxBin,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
b26298bd936a6afa1efed642e4845d4be88ae0fa.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "countRest.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *bin = NULL;
cudaMalloc(&bin, XSIZE*YSIZE);
int *bin_counters = NULL;
cudaMalloc(&bin_counters, XSIZE*YSIZE);
const int num_bins = 1;
const int maxBin = 1;
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
countRest<<<gridBlock,threadBlock>>>(bin,bin_counters,num_bins,maxBin,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
countRest<<<gridBlock,threadBlock>>>(bin,bin_counters,num_bins,maxBin,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
countRest<<<gridBlock,threadBlock>>>(bin,bin_counters,num_bins,maxBin,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
0b930feb27b077813c3ae259a08ed6bfca369dc5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* main.cpp
*
* Created on: Mar 24, 2011
* Author: cheesinglee
*/
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <algorithm>
#include <sstream>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <dirent.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <string>
#include <cstdarg>
#include "slamtypes.h"
//#include "slamparams.h"
#include <cutil.h>
//#include <complex.h>
//#include <fftw3.h>
#include <assert.h>
#include <float.h>
#include "cuPrintf.hip"
#include "device_math.cuh"
#include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/remove.h>
#include <thrust/copy.h>
#include <thrust/partition.h>
#include <thrust/scan.h>
#include <thrust/iterator/zip_iterator.h>
//#include "ConstantVelocity2DKinematicModel.cu"
// include gcc-compiled boost rng
#include "rng.h"
#ifdef __CDT_PARSER__
#define __device__
#define __global__
#define __constant__
#define __shared__
#define __host__
#endif
#define DEBUG
#ifdef DEBUG
#define DEBUG_MSG(x) cout << "[" << __func__ << "(" << __LINE__ << ")]: " << x << endl
#define DEBUG_VAL(x) cout << "[" << __func__ << "(" << __LINE__ << ")]: " << #x << " = " << x << endl
#else
#define DEBUG_MSG(x)
#define DEBUG_VAL(x)
#endif
//--- Make kernel helper functions externally visible
void
initCphdConstants() ;
void
predictMap(SynthSLAM& p) ;
void
phdPredict(SynthSLAM& particles, ... ) ;
template<class GaussianType>
void
phdPredictVp( SynthSLAM& particles ) ;
SynthSLAM
phdUpdate(SynthSLAM& particles, measurementSet measurements) ;
template <typename T>
T resampleParticles(T oldParticles, int n_particles=-1 ) ;
void
recoverSlamState(SynthSLAM& particles, ConstantVelocityState& expectedPose,
vector<REAL>& cn_estimate ) ;
void
recoverSlamState(DisparitySLAM& particles, ConstantVelocityState& expectedPose ) ;
void
setDeviceConfig( const SlamConfig& config ) ;
//--- End external declarations
//template<class GaussianType>
//__host__ __device__ REAL
//wrapAngle(REAL a) ;
//--- End external function declaration
// SLAM configuration, externally declared
extern SlamConfig config ;
// device memory limit, externally declared
extern size_t deviceMemLimit ;
// dynamic shared memory
extern __shared__ REAL shmem[] ;
using namespace std ;
using namespace thrust ;
// Constant memory variables
__device__ __constant__ RangeBearingMeasurement Z[256] ;
__device__ __constant__ SlamConfig dev_config ;
// other global device variables
REAL* dev_C ;
REAL* dev_factorial ;
REAL* log_factorials ;
//__device__ REAL* dev_qspower ;
//__device__ REAL* dev_pspower ;
REAL* dev_cn_clutter ;
//ConstantVelocityModelProps modelProps = {STDX, STDY,STDTHETA} ;
//ConstantVelocity2DKinematicModel motionModel(modelProps) ;
/// helper function for outputting a Gaussian to std_out
template<class GaussianType>
__host__ void
print_feature(GaussianType f)
{
int dims = getGaussianDim(f) ;
//#if defined(__CUDA_ARCH__)
//#warning __CUDA_ARCH__ is defined
// cuPrintf("%f ",f.weight) ;
// for ( int i = 0 ; i < dims ; i++ )
// cuPrintf("%f ",f.mean[i]) ;
// for ( int i = 0 ; i < dims*dims ; i++ )
// cuPrintf("%f ",f.cov[i]) ;
// cuPrintf("\n") ;
//#else
//#warning __CUDA_ARCH__ is not defined
cout << f.weight << " " ;
for ( int i = 0 ; i < dims ; i++ )
cout << f.mean[i] << " " ;
for ( int i = 0 ; i < dims*dims ; i++)
cout << f.cov[i] << " " ;
cout << endl ;
//#endif
}
/// combine all features from all particles into a single STL vector
template<class GaussianType>
vector<GaussianType> combineFeatures(vector<vector <GaussianType> > maps, ...)
{
vector<GaussianType> concat ;
for ( unsigned int n = 0 ; n < maps.size() ; n++ )
concat.insert( concat.end(), maps[n].begin(), maps[n].end()) ;
return concat ;
}
/// return the next highest power of two
int nextPowerOfTwo(int a)
{
int n = a - 1 ;
n = n | (n >> 1) ;
n = n | (n >> 2) ;
n = n | (n >> 4) ;
n = n | (n >> 8);
n = n | (n >> 16) ;
n = n + 1 ;
return n ;
}
__device__ void
computeBirth( ConstantVelocityState pose, RangeBearingMeasurement z,
Gaussian2D& feature_birth)
{
// invert measurement
REAL theta = pose.ptheta + z.bearing ;
REAL dx = z.range*cos(theta) ;
REAL dy = z.range*sin(theta) ;
feature_birth.mean[0] = pose.px + dx ;
feature_birth.mean[1] = pose.py + dy ;
// inverse measurement jacobian
REAL J[4] ;
J[0] = dx/z.range ;
J[1] = dy/z.range ;
J[2] = -dy ;
J[3] = dx ;
// measurement noise
REAL var_range = pow(dev_config.stdRange*dev_config.birthNoiseFactor,2) ;
REAL var_bearing = pow(dev_config.stdBearing*dev_config.birthNoiseFactor,2) ;
// compute birth covariance
feature_birth.cov[0] = pow(J[0],2)*var_range +
pow(J[2],2)*var_bearing ;
feature_birth.cov[1] = J[0]*J[1]*var_range +
J[2]*J[3]*var_bearing ;
feature_birth.cov[2] =
feature_birth.cov[1] ;
feature_birth.cov[3] = pow(J[1],2)*var_range +
pow(J[3],2)*var_bearing ;
// set birth weight
if(z.label==STATIC_MEASUREMENT || !dev_config.labeledMeasurements)
feature_birth.weight = safeLog(dev_config.birthWeight) ;
else
feature_birth.weight = safeLog(0) ;
}
__device__ void
computeBirth( ConstantVelocityState pose, RangeBearingMeasurement z,
Gaussian4D& feature_birth)
{
// invert measurement
REAL theta = pose.ptheta + z.bearing ;
REAL dx = z.range*cos(theta) ;
REAL dy = z.range*sin(theta) ;
feature_birth.mean[0] = pose.px + dx ;
feature_birth.mean[1] = pose.py + dy ;
// inverse measurement jacobian
REAL J[4] ;
J[0] = dx/z.range ;
J[1] = dy/z.range ;
J[2] = -dy ;
J[3] = dx ;
// measurement noise
REAL var_range = pow(dev_config.stdRange*dev_config.birthNoiseFactor,2) ;
REAL var_bearing = pow(dev_config.stdBearing*dev_config.birthNoiseFactor,2) ;
// mean birth velocity is zero
feature_birth.mean[2] = 0 ;
feature_birth.mean[3] = 0 ;
// upper 2x2 block of covariance matrix = K*R*K'
feature_birth.cov[0] = pow(J[0],2)*var_range +
pow(J[2],2)*var_bearing ;
feature_birth.cov[1] = J[0]*J[1]*var_range +
J[2]*J[3]*var_bearing ;
feature_birth.cov[4] =
feature_birth.cov[1] ;
feature_birth.cov[5] = pow(J[1],2)*var_range +
pow(J[3],2)*var_bearing ;
// lower 2 diagonal terms set to parameter value
feature_birth.cov[10] = dev_config.covVxBirth ;
feature_birth.cov[15] = dev_config.covVyBirth ;
// everything else set to 0
feature_birth.cov[2] = 0 ;
feature_birth.cov[3] = 0 ;
feature_birth.cov[6] = 0 ;
feature_birth.cov[7] = 0 ;
feature_birth.cov[8] = 0 ;
feature_birth.cov[9] = 0 ;
feature_birth.cov[11] = 0 ;
feature_birth.cov[12] = 0 ;
feature_birth.cov[13] = 0 ;
feature_birth.cov[14] = 0 ;
// set birth weight
if (z.label == DYNAMIC_MEASUREMENT || !dev_config.labeledMeasurements)
feature_birth.weight = safeLog(dev_config.birthWeight) ;
else
feature_birth.weight = safeLog(0) ;
}
__device__ void
computePreUpdate( ConstantVelocityState pose, Gaussian2D feature_predict,
int n_features, int n_measure, REAL& feature_pd,
Gaussian2D& feature_nondetect,
Gaussian2D*& features_update)
{
// predicted measurement
REAL dx = feature_predict.mean[0] - pose.px ;
REAL dy = feature_predict.mean[1] - pose.py ;
REAL r2 = dx*dx + dy*dy ;
REAL r = sqrt(r2) ;
REAL bearing = wrapAngle(atan2f(dy,dx) - pose.ptheta) ;
// probability of detection
feature_pd = 0 ;
if ( r <= dev_config.maxRange && fabsf(bearing) <= dev_config.maxBearing )
feature_pd = dev_config.pd ;
// write non-detection term
copy_gaussians(feature_predict,feature_nondetect) ;
feature_nondetect.weight = feature_predict.weight*(1-feature_pd) ;
// measurement jacobian wrt feature
REAL J[4] ;
J[0] = dx/r ;
J[2] = dy/r ;
J[1] = -dy/r2 ;
J[3] = dx/r2 ;
REAL* P = feature_predict.cov ;
// BEGIN Maple-Generated expressions
// innovation covariance
REAL sigma[4] ;
sigma[0] = (P[0] * J[0] + J[2] * P[1]) * J[0] + (J[0] * P[2] + P[3] * J[2]) * J[2] + pow(dev_config.stdRange,2) ;
sigma[1] = (P[0] * J[1] + J[3] * P[1]) * J[0] + (J[1] * P[2] + P[3] * J[3]) * J[2];
sigma[2] = (P[0] * J[0] + J[2] * P[1]) * J[1] + (J[0] * P[2] + P[3] * J[2]) * J[3];
sigma[3] = (P[0] * J[1] + J[3] * P[1]) * J[1] + (J[1] * P[2] + P[3] * J[3]) * J[3] + pow(dev_config.stdBearing,2) ;
// enforce symmetry
sigma[1] = (sigma[1]+sigma[2])/2 ;
sigma[2] = sigma[1] ;
REAL det_sigma = sigma[0]*sigma[3] - sigma[1]*sigma[2] ;
// inverse of sigma
REAL S[4] ;
S[0] = sigma[3]/(det_sigma) ;
S[1] = -sigma[1]/(det_sigma) ;
S[2] = -sigma[2]/(det_sigma) ;
S[3] = sigma[0]/(det_sigma) ;
// Kalman gain
REAL K[4] ;
K[0] = S[0]*(P[0]*J[0] + P[2]*J[2]) + S[1]*(P[0]*J[1] + P[2]*J[3]) ;
K[1] = S[0]*(P[1]*J[0] + P[3]*J[2]) + S[1]*(P[1]*J[1] + P[3]*J[3]) ;
K[2] = S[2]*(P[0]*J[0] + P[2]*J[2]) + S[3]*(P[0]*J[1] + P[2]*J[3]) ;
K[3] = S[2]*(P[1]*J[0] + P[3]*J[2]) + S[3]*(P[1]*J[1] + P[3]*J[3]) ;
REAL cov_update[4] ;
cov_update[0] = ((1 - K[0] * J[0] - K[2] * J[1]) * P[0] + (-K[0] * J[2] - K[2] * J[3]) * P[1]) * (1 - K[0] * J[0] - K[2] * J[1]) + ((1 - K[0] * J[0] - K[2] * J[1]) * P[2] + (-K[0] * J[2] - K[2] * J[3]) * P[3]) * (-K[0] * J[2] - K[2] * J[3]) + pow(K[0], 2) * dev_config.stdRange*dev_config.stdRange + pow(K[2], 2) * dev_config.stdBearing*dev_config.stdBearing;
cov_update[2] = ((1 - K[0] * J[0] - K[2] * J[1]) * P[0] + (-K[0] * J[2] - K[2] * J[3]) * P[1]) * (-K[1] * J[0] - K[3] * J[1]) + ((1 - K[0] * J[0] - K[2] * J[1]) * P[2] + (-K[0] * J[2] - K[2] * J[3]) * P[3]) * (1 - K[1] * J[2] - K[3] * J[3]) + K[0] * dev_config.stdRange*dev_config.stdRange * K[1] + K[2] * dev_config.stdBearing*dev_config.stdBearing * K[3];
cov_update[1] = ((-K[1] * J[0] - K[3] * J[1]) * P[0] + (1 - K[1] * J[2] - K[3] * J[3]) * P[1]) * (1 - K[0] * J[0] - K[2] * J[1]) + ((-K[1] * J[0] - K[3] * J[1]) * P[2] + (1 - K[1] * J[2] - K[3] * J[3]) * P[3]) * (-K[0] * J[2] - K[2] * J[3]) + K[0] * dev_config.stdRange*dev_config.stdRange * K[1] + K[2] * dev_config.stdBearing*dev_config.stdBearing * K[3];
cov_update[3] = ((-K[1] * J[0] - K[3] * J[1]) * P[0] + (1 - K[1] * J[2] - K[3] * J[3]) * P[1]) * (-K[1] * J[0] - K[3] * J[1]) + ((-K[1] * J[0] - K[3] * J[1]) * P[2] + (1 - K[1] * J[2] - K[3] * J[3]) * P[3]) * (1 - K[1] * J[2] - K[3] * J[3]) + pow(K[1], 2) * dev_config.stdRange*dev_config.stdRange + pow(K[3], 2) * dev_config.stdBearing*dev_config.stdBearing;
REAL innov[2] ;
REAL dist = 0 ;
for ( int m = 0 ; m < n_measure ; m++ )
{
int idx = m*n_features ;
innov[0] = Z[m].range - r ;
innov[1] = wrapAngle(Z[m].bearing - bearing) ;
features_update[idx].mean[0] = feature_predict.mean[0] + K[0]*innov[0] + K[2]*innov[1] ;
features_update[idx].mean[1] = feature_predict.mean[1] + K[1]*innov[0] + K[3]*innov[1] ;
for ( int i = 0 ; i < 4 ; i++ )
features_update[idx].cov[i] = cov_update[i] ;
// compute single object likelihood
dist = innov[0]*innov[0]*S[0] +
innov[0]*innov[1]*(S[1] + S[2]) +
innov[1]*innov[1]*S[3] ;
if(Z[m].label==STATIC_MEASUREMENT || !dev_config.labeledMeasurements)
{
// partially update weight (log-transformed)
features_update[idx].weight = safeLog(feature_pd)
+ safeLog(feature_predict.weight)
- 0.5*dist
- safeLog(2*M_PI)
- 0.5*safeLog(det_sigma) ;
}
else
{
features_update[idx].weight = safeLog(0) ;
}
}
}
__device__ void
computePreUpdate( ConstantVelocityState pose, Gaussian4D feature_predict,
int n_features, int n_measure, REAL& feature_pd,
Gaussian4D& feature_nondetect,
Gaussian4D*& features_update)
{
// predicted measurement
REAL dx = feature_predict.mean[0] - pose.px ;
REAL dy = feature_predict.mean[1] - pose.py ;
REAL r2 = dx*dx + dy*dy ;
REAL r = sqrt(r2) ;
REAL bearing = wrapAngle(atan2f(dy,dx) - pose.ptheta) ;
// probability of detection
feature_pd = 0 ;
if ( r <= dev_config.maxRange && fabsf(bearing) <= dev_config.maxBearing )
feature_pd = dev_config.pd ;
// write non-detection term
copy_gaussians(feature_predict,feature_nondetect) ;
feature_nondetect.weight = feature_predict.weight*(1-feature_pd) ;
// measurement jacobian wrt feature
REAL J[4] ;
J[0] = dx/r ;
J[2] = dy/r ;
J[1] = -dy/r2 ;
J[3] = dx/r2 ;
REAL* P = feature_predict.cov ;
// BEGIN Maple-Generated expressions
// innovation covariance
REAL sigma[4] ;
REAL var_range = pow(dev_config.stdRange,2) ;
REAL var_bearing = pow(dev_config.stdBearing,2) ;
sigma[0] = J[0] * (P[0] * J[0] + P[4] * J[2]) + J[2] * (P[1] * J[0] + P[5] * J[2]) + var_range;
sigma[1] = J[1] * (P[0] * J[0] + P[4] * J[2]) + J[3] * (P[1] * J[0] + P[5] * J[2]);
sigma[2] = J[0] * (P[0] * J[1] + P[4] * J[3]) + J[2] * (P[1] * J[1] + P[5] * J[3]);
sigma[3] = J[1] * (P[0] * J[1] + P[4] * J[3]) + J[3] * (P[1] * J[1] + P[5] * J[3]) + var_bearing;
// enforce symmetry
sigma[1] = (sigma[1]+sigma[2])/2 ;
sigma[2] = sigma[1] ;
// makePositiveDefinite(sigma) ;
REAL det_sigma = sigma[0]*sigma[3] - sigma[1]*sigma[2] ;
REAL S[4] ;
S[0] = sigma[3]/(det_sigma) ;
S[1] = -sigma[1]/(det_sigma) ;
S[2] = -sigma[2]/(det_sigma) ;
S[3] = sigma[0]/(det_sigma) ;
// Kalman gain
REAL K[8] ;
K[0] = P[0] * (J[0] * S[0] + J[1] * S[1])
+ P[4] * (J[2] * S[0] + J[3] * S[1]);
K[1] = P[1] * (J[0] * S[0] + J[1] * S[1])
+ P[5] * (J[2] * S[0] + J[3] * S[1]);
K[2] = P[2] * (J[0] * S[0] + J[1] * S[1])
+ P[6] * (J[2] * S[0] + J[3] * S[1]);
K[3] = P[3] * (J[0] * S[0] + J[1] * S[1])
+ P[7] * (J[2] * S[0] + J[3] * S[1]);
K[4] = P[0] * (J[0] * S[2] + J[1] * S[3])
+ P[4] * (J[2] * S[2] + J[3] * S[3]);
K[5] = P[1] * (J[0] * S[2] + J[1] * S[3])
+ P[5] * (J[2] * S[2] + J[3] * S[3]);
K[6] = P[2] * (J[0] * S[2] + J[1] * S[3])
+ P[6] * (J[2] * S[2] + J[3] * S[3]);
K[7] = P[3] * (J[0] * S[2] + J[1] * S[3])
+ P[7] * (J[2] * S[2] + J[3] * S[3]);
// Updated covariance (Joseph Form)
REAL cov_update[16] ;
cov_update[0] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + var_range * pow( K[0], 2) + var_bearing * pow( K[4], 2);
cov_update[1] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + K[0] * var_range * K[1] + K[4] * var_bearing * K[5];
cov_update[2] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + P[2] * (1 - K[0] * J[0] - K[4] * J[1]) + P[6] * (-K[0] * J[2] - K[4] * J[3]) + K[0] * var_range * K[2] + K[4] * var_bearing * K[6];
cov_update[3] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + P[3] * (1 - K[0] * J[0] - K[4] * J[1]) + P[7] * (-K[0] * J[2] - K[4] * J[3]) + K[0] * var_range * K[3] + K[4] * var_bearing * K[7];
cov_update[4] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + K[0] * var_range * K[1] + K[4] * var_bearing * K[5];
cov_update[5] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + var_range * pow( K[1], 2) + var_bearing * pow( K[5], 2);
cov_update[6] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + P[2] * (-K[1] * J[0] - K[5] * J[1]) + P[6] * (1 - K[1] * J[2] - K[5] * J[3]) + K[1] * var_range * K[2] + K[5] * var_bearing * K[6];
cov_update[7] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + P[3] * (-K[1] * J[0] - K[5] * J[1]) + P[7] * (1 - K[1] * J[2] - K[5] * J[3]) + K[1] * var_range * K[3] + K[5] * var_bearing * K[7];
cov_update[8] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + K[0] * var_range * K[2] + K[4] * var_bearing * K[6];
cov_update[9] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + K[1] * var_range * K[2] + K[5] * var_bearing * K[6];
cov_update[10] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + P[2] * (-K[2] * J[0] - K[6] * J[1]) + P[6] * (-K[2] * J[2] - K[6] * J[3]) + P[10] + var_range * pow( K[2], 2) + var_bearing * pow( K[6], 2);
cov_update[11] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + P[3] * (-K[2] * J[0] - K[6] * J[1]) + P[7] * (-K[2] * J[2] - K[6] * J[3]) + P[11] + K[2] * var_range * K[3] + K[6] * var_bearing * K[7];
cov_update[12] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + K[0] * var_range * K[3] + K[4] * var_bearing * K[7];
cov_update[13] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + K[1] * var_range * K[3] + K[5] * var_bearing * K[7];
cov_update[14] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + P[2] * (-K[3] * J[0] - K[7] * J[1]) + P[6] * (-K[3] * J[2] - K[7] * J[3]) + P[14] + K[2] * var_range * K[3] + K[6] * var_bearing * K[7];
cov_update[15] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + P[3] * (-K[3] * J[0] - K[7] * J[1]) + P[7] * (-K[3] * J[2] - K[7] * J[3]) + P[15] + var_range * pow( K[3], 2) + var_bearing * pow( K[7], 2);
REAL innov[2] ;
REAL dist = 0 ;
for ( int m = 0 ; m < n_measure ; m++ )
{
int idx = m*n_features ;
innov[0] = Z[m].range - r ;
innov[1] = wrapAngle(Z[m].bearing - bearing) ;
features_update[idx].mean[0] = feature_predict.mean[0] + K[0]*innov[0] + K[4]*innov[1] ;
features_update[idx].mean[1] = feature_predict.mean[1] + K[1]*innov[0] + K[5]*innov[1] ;
features_update[idx].mean[2] = feature_predict.mean[2] + K[2]*innov[0] + K[6]*innov[1] ;
features_update[idx].mean[3] = feature_predict.mean[3] + K[3]*innov[0] + K[7]*innov[1] ;
for ( int i = 0 ; i < 16 ; i++ )
features_update[idx].cov[i] = cov_update[i] ;
// compute single object likelihood
dist = innov[0]*innov[0]*S[0] +
innov[0]*innov[1]*(S[1] + S[2]) +
innov[1]*innov[1]*S[3] ;
if(Z[m].label==DYNAMIC_MEASUREMENT || !dev_config.labeledMeasurements)
{
// partially update weight (log-transformed)
features_update[idx].weight = safeLog(feature_pd)
+ safeLog(feature_predict.weight)
- 0.5*dist
- safeLog(2*M_PI)
- 0.5*safeLog(det_sigma) ;
}
else
{
features_update[idx].weight = safeLog(0) ;
}
}
}
///// computes various components for the Kalman update of a particular feature
///*!
// * Given a vehicle pose and feature Gaussian, the function computes the Kalman
// * gain, updated covariance, innovation covariance, determinant of the
// * innovation covariance, probability of detection, and predicted measurement.
// * The computed values are stored at the addresses referenced by the passed
// * pointers.
// *
// * This code is specific to XY-heading vehicle state with range-bearing
// * measurements to XY point features.
// \param pose vehicle pose
// \param feature feature gaussian
// \param K pointer to store Kalman gain matrix
// \param cov_update pointer to store updated covariance matrix
// \param det_sigma pointer to store determinant of innov. covariance
// \param S pointer to store innov. covariance matrix.
// \param feature_pd pointer to store feature probability of detect.
// \param z_predict pointer to store predicted measurement
// */
//__device__ void
//computePreUpdateComponents( ConstantVelocityState pose,
// Gaussian2D feature, REAL* K,
// REAL* cov_update, REAL* det_sigma,
// REAL* S, REAL* feature_pd,
// RangeBearingMeasurement* z_predict )
//{
// // predicted measurement
// REAL dx = feature.mean[0] - pose.px ;
// REAL dy = feature.mean[1] - pose.py ;
// REAL r2 = dx*dx + dy*dy ;
// REAL r = sqrt(r2) ;
// REAL bearing = wrapAngle(atan2f(dy,dx) - pose.ptheta) ;
// z_predict->range = r ;
// z_predict->bearing = bearing ;
// // probability of detection
// if ( r <= dev_config.maxRange && fabsf(bearing) <= dev_config.maxBearing )
// *feature_pd = dev_config.pd ;
// else
// *feature_pd = 0 ;
// // measurement jacobian wrt feature
// REAL J[4] ;
// J[0] = dx/r ;
// J[2] = dy/r ;
// J[1] = -dy/r2 ;
// J[3] = dx/r2 ;
// // predicted feature covariance
// REAL* P = feature.cov ;
// // BEGIN Maple-Generated expressions
// // innovation covariance
// REAL sigma[4] ;
// sigma[0] = (P[0] * J[0] + J[2] * P[1]) * J[0] + (J[0] * P[2] + P[3] * J[2]) * J[2] + pow(dev_config.stdRange,2) ;
// sigma[1] = (P[0] * J[1] + J[3] * P[1]) * J[0] + (J[1] * P[2] + P[3] * J[3]) * J[2];
// sigma[2] = (P[0] * J[0] + J[2] * P[1]) * J[1] + (J[0] * P[2] + P[3] * J[2]) * J[3];
// sigma[3] = (P[0] * J[1] + J[3] * P[1]) * J[1] + (J[1] * P[2] + P[3] * J[3]) * J[3] + pow(dev_config.stdBearing,2) ;
// // enforce symmetry
// sigma[1] = (sigma[1]+sigma[2])/2 ;
// sigma[2] = sigma[1] ;
//// makePositiveDefinite(sigma) ;
// *det_sigma = sigma[0]*sigma[3] - sigma[1]*sigma[2] ;
// S[0] = sigma[3]/(*det_sigma) ;
// S[1] = -sigma[1]/(*det_sigma) ;
// S[2] = -sigma[2]/(*det_sigma) ;
// S[3] = sigma[0]/(*det_sigma) ;
// // Kalman gain
// K[0] = S[0]*(P[0]*J[0] + P[2]*J[2]) + S[1]*(P[0]*J[1] + P[2]*J[3]) ;
// K[1] = S[0]*(P[1]*J[0] + P[3]*J[2]) + S[1]*(P[1]*J[1] + P[3]*J[3]) ;
// K[2] = S[2]*(P[0]*J[0] + P[2]*J[2]) + S[3]*(P[0]*J[1] + P[2]*J[3]) ;
// K[3] = S[2]*(P[1]*J[0] + P[3]*J[2]) + S[3]*(P[1]*J[1] + P[3]*J[3]) ;
// // Updated covariance (Joseph Form)
// cov_update[0] = ((1 - K[0] * J[0] - K[2] * J[1]) * P[0] + (-K[0] * J[2] - K[2] * J[3]) * P[1]) * (1 - K[0] * J[0] - K[2] * J[1]) + ((1 - K[0] * J[0] - K[2] * J[1]) * P[2] + (-K[0] * J[2] - K[2] * J[3]) * P[3]) * (-K[0] * J[2] - K[2] * J[3]) + pow(K[0], 2) * dev_config.stdRange*dev_config.stdRange + pow(K[2], 2) * dev_config.stdBearing*dev_config.stdBearing;
// cov_update[2] = ((1 - K[0] * J[0] - K[2] * J[1]) * P[0] + (-K[0] * J[2] - K[2] * J[3]) * P[1]) * (-K[1] * J[0] - K[3] * J[1]) + ((1 - K[0] * J[0] - K[2] * J[1]) * P[2] + (-K[0] * J[2] - K[2] * J[3]) * P[3]) * (1 - K[1] * J[2] - K[3] * J[3]) + K[0] * dev_config.stdRange*dev_config.stdRange * K[1] + K[2] * dev_config.stdBearing*dev_config.stdBearing * K[3];
// cov_update[1] = ((-K[1] * J[0] - K[3] * J[1]) * P[0] + (1 - K[1] * J[2] - K[3] * J[3]) * P[1]) * (1 - K[0] * J[0] - K[2] * J[1]) + ((-K[1] * J[0] - K[3] * J[1]) * P[2] + (1 - K[1] * J[2] - K[3] * J[3]) * P[3]) * (-K[0] * J[2] - K[2] * J[3]) + K[0] * dev_config.stdRange*dev_config.stdRange * K[1] + K[2] * dev_config.stdBearing*dev_config.stdBearing * K[3];
// cov_update[3] = ((-K[1] * J[0] - K[3] * J[1]) * P[0] + (1 - K[1] * J[2] - K[3] * J[3]) * P[1]) * (-K[1] * J[0] - K[3] * J[1]) + ((-K[1] * J[0] - K[3] * J[1]) * P[2] + (1 - K[1] * J[2] - K[3] * J[3]) * P[3]) * (1 - K[1] * J[2] - K[3] * J[3]) + pow(K[1], 2) * dev_config.stdRange*dev_config.stdRange + pow(K[3], 2) * dev_config.stdBearing*dev_config.stdBearing;
//}
//__device__ void
//computePreUpdateComponentsDynamic( ConstantVelocityState pose,
// Gaussian4D feature, REAL* K,
// REAL* cov_update, REAL* det_sigma,
// REAL* S, REAL* feature_pd,
// RangeBearingMeasurement* z_predict )
//{
// // predicted measurement
// REAL dx = feature.mean[0] - pose.px ;
// REAL dy = feature.mean[1] - pose.py ;
// REAL r2 = dx*dx + dy*dy ;
// REAL r = sqrt(r2) ;
// REAL bearing = wrapAngle(atan2f(dy,dx) - pose.ptheta) ;
// z_predict->range = r ;
// z_predict->bearing = bearing ;
// // probability of detection
// if ( r <= dev_config.maxRange && fabsf(bearing) <= dev_config.maxBearing )
// *feature_pd = dev_config.pd ;
// else
// *feature_pd = 0 ;
// // measurement jacobian wrt feature
// REAL J[4] ;
// J[0] = dx/r ;
// J[2] = dy/r ;
// J[1] = -dy/r2 ;
// J[3] = dx/r2 ;
// // predicted feature covariance
// REAL* P = feature.cov ;
// // BEGIN Maple-Generated expressions
// // innovation covariance
// REAL sigma[4] ;
// REAL var_range = pow(dev_config.stdRange,2) ;
// REAL var_bearing = pow(dev_config.stdBearing,2) ;
// sigma[0] = J[0] * (P[0] * J[0] + P[4] * J[2]) + J[2] * (P[1] * J[0] + P[5] * J[2]) + var_range;
// sigma[1] = J[1] * (P[0] * J[0] + P[4] * J[2]) + J[3] * (P[1] * J[0] + P[5] * J[2]);
// sigma[2] = J[0] * (P[0] * J[1] + P[4] * J[3]) + J[2] * (P[1] * J[1] + P[5] * J[3]);
// sigma[3] = J[1] * (P[0] * J[1] + P[4] * J[3]) + J[3] * (P[1] * J[1] + P[5] * J[3]) + var_bearing;
// // enforce symmetry
// sigma[1] = (sigma[1]+sigma[2])/2 ;
// sigma[2] = sigma[1] ;
//// makePositiveDefinite(sigma) ;
// *det_sigma = sigma[0]*sigma[3] - sigma[1]*sigma[2] ;
// S[0] = sigma[3]/(*det_sigma) ;
// S[1] = -sigma[1]/(*det_sigma) ;
// S[2] = -sigma[2]/(*det_sigma) ;
// S[3] = sigma[0]/(*det_sigma) ;
// // Kalman gain
// K[0] = P[0] * (J[0] * S[0] + J[1] * S[1])
// + P[4] * (J[2] * S[0] + J[3] * S[1]);
// K[1] = P[1] * (J[0] * S[0] + J[1] * S[1])
// + P[5] * (J[2] * S[0] + J[3] * S[1]);
// K[2] = P[2] * (J[0] * S[0] + J[1] * S[1])
// + P[6] * (J[2] * S[0] + J[3] * S[1]);
// K[3] = P[3] * (J[0] * S[0] + J[1] * S[1])
// + P[7] * (J[2] * S[0] + J[3] * S[1]);
// K[4] = P[0] * (J[0] * S[2] + J[1] * S[3])
// + P[4] * (J[2] * S[2] + J[3] * S[3]);
// K[5] = P[1] * (J[0] * S[2] + J[1] * S[3])
// + P[5] * (J[2] * S[2] + J[3] * S[3]);
// K[6] = P[2] * (J[0] * S[2] + J[1] * S[3])
// + P[6] * (J[2] * S[2] + J[3] * S[3]);
// K[7] = P[3] * (J[0] * S[2] + J[1] * S[3])
// + P[7] * (J[2] * S[2] + J[3] * S[3]);
// // Updated covariance (Joseph Form)
// cov_update[0] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + var_range * pow( K[0], 2) + var_bearing * pow( K[4], 2);
// cov_update[1] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + K[0] * var_range * K[1] + K[4] * var_bearing * K[5];
// cov_update[2] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + P[2] * (1 - K[0] * J[0] - K[4] * J[1]) + P[6] * (-K[0] * J[2] - K[4] * J[3]) + K[0] * var_range * K[2] + K[4] * var_bearing * K[6];
// cov_update[3] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + P[3] * (1 - K[0] * J[0] - K[4] * J[1]) + P[7] * (-K[0] * J[2] - K[4] * J[3]) + K[0] * var_range * K[3] + K[4] * var_bearing * K[7];
// cov_update[4] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + K[0] * var_range * K[1] + K[4] * var_bearing * K[5];
// cov_update[5] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + var_range * pow( K[1], 2) + var_bearing * pow( K[5], 2);
// cov_update[6] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + P[2] * (-K[1] * J[0] - K[5] * J[1]) + P[6] * (1 - K[1] * J[2] - K[5] * J[3]) + K[1] * var_range * K[2] + K[5] * var_bearing * K[6];
// cov_update[7] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + P[3] * (-K[1] * J[0] - K[5] * J[1]) + P[7] * (1 - K[1] * J[2] - K[5] * J[3]) + K[1] * var_range * K[3] + K[5] * var_bearing * K[7];
// cov_update[8] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + K[0] * var_range * K[2] + K[4] * var_bearing * K[6];
// cov_update[9] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + K[1] * var_range * K[2] + K[5] * var_bearing * K[6];
// cov_update[10] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + P[2] * (-K[2] * J[0] - K[6] * J[1]) + P[6] * (-K[2] * J[2] - K[6] * J[3]) + P[10] + var_range * pow( K[2], 2) + var_bearing * pow( K[6], 2);
// cov_update[11] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + P[3] * (-K[2] * J[0] - K[6] * J[1]) + P[7] * (-K[2] * J[2] - K[6] * J[3]) + P[11] + K[2] * var_range * K[3] + K[6] * var_bearing * K[7];
// cov_update[12] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + K[0] * var_range * K[3] + K[4] * var_bearing * K[7];
// cov_update[13] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + K[1] * var_range * K[3] + K[5] * var_bearing * K[7];
// cov_update[14] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + P[2] * (-K[3] * J[0] - K[7] * J[1]) + P[6] * (-K[3] * J[2] - K[7] * J[3]) + P[14] + K[2] * var_range * K[3] + K[6] * var_bearing * K[7];
// cov_update[15] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + P[3] * (-K[3] * J[0] - K[7] * J[1]) + P[7] * (-K[3] * J[2] - K[7] * J[3]) + P[15] + var_range * pow( K[3], 2) + var_bearing * pow( K[7], 2);
//}
///// kernel for computing various constants used in the CPHD filter
//__global__ void
//cphdConstantsKernel( REAL* dev_factorial, REAL* dev_C, REAL* dev_cn_clutter )
//{
// int n = threadIdx.x ;
// int k = blockIdx.x ;
// REAL* factorial = (REAL*)shmem ;
// factorial[n] = dev_factorial[n] ;
// __syncthreads() ;
// // compute the log binomial coefficients (nchoosek)
// int stride = dev_config.maxCardinality + 1 ;
// int idx = k*stride + n ;
// REAL log_nchoosek = 0 ;
// if ( k == 0 )
// {
// log_nchoosek = 0 ;
// }
// else if ( n == 0 || k > n )
// {
// log_nchoosek = LOG0 ;
// }
// else
// {
// log_nchoosek = factorial[n] - factorial[k]
// - factorial[n-k] ;
// }
// dev_C[idx] = log_nchoosek ;
// // thread block 0 computes the clutter cardinality
// if ( k == 0 )
// {
// dev_cn_clutter[n] = n*safeLog(dev_config.clutterRate)
// - dev_config.clutterRate
// - factorial[n] ;
// }
//// // for debugging: clutter cardinality with constant number of clutter
//// if ( k== 0 )
//// {
//// if ( n == dev_config.clutterRate)
//// dev_cn_clutter[n] = 0 ;
//// else
//// dev_cn_clutter[n] = LOG0 ;
//// }
//}
///// host-side helper function to call cphdConstantsKernel
//void
//initCphdConstants()
//{
// log_factorials = (REAL*)malloc( (config.maxCardinality+1)*sizeof(REAL) ) ;
// log_factorials[0] = 0 ;
// for ( int n = 1 ; n <= config.maxCardinality ; n++ )
// {
// log_factorials[n] = log_factorials[n-1] + safeLog((REAL)n) ;
// }
// CUDA_SAFE_CALL( hipMalloc( (void**)&dev_C,
// pow(config.maxCardinality+1,2)*sizeof(REAL) ) ) ;
// CUDA_SAFE_CALL( hipMalloc( (void**)&dev_factorial,
// (config.maxCardinality+1)*sizeof(REAL) ) ) ;
// CUDA_SAFE_CALL( hipMalloc( (void**)&dev_cn_clutter,
// (config.maxCardinality+1)*sizeof(REAL) ) ) ;
// CUDA_SAFE_CALL( hipMemcpy( dev_factorial, &log_factorials[0],
// (config.maxCardinality+1)*sizeof(REAL),
// hipMemcpyHostToDevice ) ) ;
// CUDA_SAFE_THREAD_SYNC() ;
//// CUDA_SAFE_CALL(
//// hipMalloc( (void**)&dev_pspower,
//// (config.maxCardinality+1)*sizeof(REAL) ) ) ;
//// CUDA_SAFE_CALL(
//// hipMalloc( (void**)&dev_qspower,
//// (config.maxCardinality+1)*sizeof(REAL) ) ) ;
// int n_blocks = config.maxCardinality+1 ;
// int n_threads = n_blocks ;
// cphdConstantsKernel<<<n_blocks, n_threads, n_threads*sizeof(REAL)>>>
// ( dev_factorial, dev_C, dev_cn_clutter ) ;
// CUDA_SAFE_THREAD_SYNC() ;
//}
/// kernel for particle prediction with an ackerman steering motion model
__global__ void
phdPredictKernelAckerman(ConstantVelocityState* particles_prior,
AckermanControl control,
AckermanNoise* noise,
ConstantVelocityState* particles_predict,
int n_predict)
{
const int tid = threadIdx.x ;
const int predict_idx = blockIdx.x*blockDim.x + tid ;
if (predict_idx < n_predict)
{
// get the prior state from which this prediction is generated
const int prior_idx = floor((float)predict_idx/dev_config.nPredictParticles) ;
ConstantVelocityState oldState = particles_prior[prior_idx] ;
// use the motion model to compute the prediction
ConstantVelocityState newState ;
REAL ve_noisy = control.v_encoder + noise[predict_idx].n_encoder ;
REAL alpha_noisy = control.alpha + noise[predict_idx].n_alpha ;
REAL vc = ve_noisy/(1-tan(alpha_noisy)*dev_config.h/dev_config.l) ;
REAL xc_dot = vc*cos(oldState.ptheta) ;
REAL yc_dot = vc*sin(oldState.ptheta) ;
REAL thetac_dot = vc*tan(alpha_noisy)/dev_config.l ;
REAL dt = dev_config.dt/dev_config.subdividePredict ;
newState.px = oldState.px +
dt*(xc_dot -
thetac_dot*( dev_config.a*sin(oldState.ptheta) + dev_config.b*cos(oldState.ptheta) )
) ;
newState.py = oldState.py +
dt*(yc_dot +
thetac_dot*( dev_config.a*cos(oldState.ptheta) - dev_config.b*sin(oldState.ptheta) )
) ;
newState.ptheta = wrapAngle(oldState.ptheta + dt*thetac_dot) ;
newState.vx = 0 ;
newState.vy = 0 ;
newState.vtheta = 0 ;
// save predicted state to memory
particles_predict[predict_idx] = newState ;
}
}
__global__ void
phdPredictKernel(ConstantVelocityState* particles_prior,
ConstantVelocityNoise* noise, ConstantVelocityState* particles_predict,
int n_predict)
{
const int tid = threadIdx.x ;
const int predict_idx = blockIdx.x*blockDim.x + tid ;
if (predict_idx < n_predict)
{
const int prior_idx = floor((float)predict_idx/dev_config.nPredictParticles) ;
ConstantVelocityState oldState = particles_prior[prior_idx] ;
ConstantVelocityState newState ;
REAL dt = dev_config.dt/dev_config.subdividePredict ;
// typename modelType::stateType newState = mm(particles[particleIdx],*control,noise[particleIdx]) ;
newState.px = oldState.px +
dt*(oldState.vx*cos(oldState.ptheta) -
oldState.vy*sin(oldState.ptheta))+
dt*dt*0.5*(noise[predict_idx].ax*cos(oldState.ptheta) -
noise[predict_idx].ay*sin(oldState.ptheta)) ;
newState.py = oldState.py +
dt*(oldState.vx*sin(oldState.ptheta) +
oldState.vy*cos(oldState.ptheta)) +
dt*dt*0.5*(noise[predict_idx].ax*sin(oldState.ptheta) +
noise[predict_idx].ay*cos(oldState.ptheta)) ;
newState.ptheta = wrapAngle(oldState.ptheta +
dt*oldState.vtheta +
0.5*dt*dt*noise[predict_idx].atheta) ;
newState.vx = oldState.vx + dt*noise[predict_idx].ax ;
newState.vy = oldState.vy + dt*noise[predict_idx].ay ;
newState.vtheta = oldState.vtheta + dt*noise[predict_idx].atheta ;
particles_predict[predict_idx] = newState ;
}
}
/// predict the cardinality distribution for the CPHD filter
/**
Each thread block processes the cardinality for a single particle. Each thread
inside the block computes the predicted cardinality for a particular value of
n.
*/
__global__ void
cardinalityPredictKernel( REAL* cn_prior, REAL* cn_births, REAL* dev_C,
REAL* cn_predict )
{
int n = threadIdx.x ;
int cn_offset = blockIdx.x * (dev_config.maxCardinality+1) ;
REAL* cn_prior_shared = (REAL*)shmem ;
// load the prior cardinality into shared mem
cn_prior_shared[n] = cn_prior[cn_offset+n] ;
__syncthreads() ;
REAL outersum = 0 ;
for ( int j = 0 ; j <= n ; j++ )
{
outersum += exp(cn_births[n-j]+cn_prior_shared[j]) ;
}
if ( outersum != 0)
cn_predict[cn_offset+n] = safeLog(outersum) ;
else
cn_predict[cn_offset+n] = LOG0 ;
}
/// compute the predicted states of every feature
template<class GaussianType, class MotionModelType>
__global__ void
predictMapKernel(GaussianType* features_prior, MotionModelType model,
int n_features, GaussianType* features_predict)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x ;
// loop over all features
for (int j = 0 ; j < n_features ; j+=blockDim.x*gridDim.x)
{
int idx = j+tid ;
if ( idx < n_features )
{
features_predict[idx] = model.compute_prediction(features_prior[idx],
dev_config.dt) ;
}
}
}
__global__ void
predictMapKernelMixed(Gaussian4D* features_prior,
ConstantVelocityMotionModel model,
int n_features, Gaussian4D* features_predict,
Gaussian2D* features_jump)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x ;
// loop over all features
for (int j = 0 ; j < n_features ; j+=blockDim.x*gridDim.x)
{
int idx = j+tid ;
if ( idx < n_features )
{
REAL vx = features_prior[idx].mean[2] ;
REAL vy = features_prior[idx].mean[3] ;
REAL v_mag = sqrt(vx*vx + vy*vy) ;
REAL sigmoid_v = 1/(1+exp(dev_config.beta*(dev_config.tau - v_mag))) ;
REAL p_jmm ;
REAL ps ;
REAL scale_x = 1 ;
REAL scale_y = 1 ;
if (dev_config.featureModel==DYNAMIC_MODEL)
{
p_jmm = 1 ;
ps = logistic_function(v_mag,0,1-dev_config.ps,dev_config.beta,
dev_config.tau) ;
ps = 1-ps ;
scale_x = logistic_function(vx,0,1,dev_config.beta,
dev_config.tau) ;
scale_y = logistic_function(vy,0,1,dev_config.beta,
dev_config.tau) ;
}
else if(dev_config.featureModel==MIXED_MODEL)
{
p_jmm = sigmoid_v ;
ps = dev_config.ps ;
// p_jmm = 1 ;
}
features_predict[idx] = model.compute_prediction(features_prior[idx],
dev_config.dt,
scale_x,scale_y) ;
features_predict[idx].weight = p_jmm*ps
*features_predict[idx].weight ;
features_jump[idx].weight = (1-p_jmm)*features_prior[idx].weight ;
features_jump[idx].mean[0] = features_prior[idx].mean[0] ;
features_jump[idx].mean[1] = features_prior[idx].mean[1] ;
features_jump[idx].cov[0] = features_prior[idx].cov[0] ;
features_jump[idx].cov[1] = features_prior[idx].cov[1] ;
features_jump[idx].cov[2] = features_prior[idx].cov[4] ;
features_jump[idx].cov[3] = features_prior[idx].cov[5] ;
}
}
}
void
predictMapMixed(SynthSLAM& particles)
{
// combine all dynamic features into one vector
vector<Gaussian4D> all_features = combineFeatures(particles.maps_dynamic) ;
int n_features = all_features.size() ;
// allocate memory
Gaussian4D* dev_features_prior = NULL ;
Gaussian4D* dev_features_predict = NULL ;
Gaussian2D* dev_features_jump = NULL ;
CUDA_SAFE_CALL(hipMalloc((void**)&dev_features_prior,
n_features*sizeof(Gaussian4D) ) ) ;
CUDA_SAFE_CALL(hipMalloc((void**)&dev_features_predict,
n_features*sizeof(Gaussian4D) ) ) ;
CUDA_SAFE_CALL(hipMalloc((void**)&dev_features_jump,
n_features*sizeof(Gaussian2D) ) ) ;
CUDA_SAFE_CALL(hipMemcpy(dev_features_prior,&all_features[0],
n_features*sizeof(Gaussian4D),
hipMemcpyHostToDevice) ) ;
int n_blocks = (n_features+255)/256 ;
// configure the feature motion model
ConstantVelocityMotionModel motion_model ;
motion_model.std_accx = config.stdAxMap ;
motion_model.std_accy = config.stdAyMap ;
// launch the kernel
hipLaunchKernelGGL(( predictMapKernelMixed), dim3(n_blocks),dim3(256), 0, 0,
dev_features_prior,motion_model,n_features, dev_features_predict,
dev_features_jump ) ;
// copy results from device
vector<Gaussian2D> all_features_jump( all_features.size() ) ;
CUDA_SAFE_CALL(hipMemcpy(&all_features[0],dev_features_predict,
n_features*sizeof(Gaussian4D),
hipMemcpyDeviceToHost)) ;
CUDA_SAFE_CALL(hipMemcpy(&all_features_jump[0],dev_features_jump,
n_features*sizeof(Gaussian2D),
hipMemcpyDeviceToHost)) ;
// load predicted features back into particles
Gaussian4D* begin = &all_features[0] ;
Gaussian4D* end = begin
+ particles.maps_dynamic[0].size() ;
Gaussian2D* begin_jump = &all_features_jump[0] ;
Gaussian2D* end_jump = begin_jump
+ particles.maps_dynamic[0].size() ;
for ( int n = 0 ; n < particles.n_particles ; n++ )
{
particles.maps_dynamic[n].assign(begin,end) ;
// if(config.featureModel==MIXED_MODEL)
// {
// particles.maps_static[n].insert(particles.maps_static[n].end(),
// begin_jump,
// end_jump ) ;
// }
if ( n < particles.n_particles - 1)
{
begin = end ;
end += particles.maps_dynamic[n+1].size() ;
begin_jump = end_jump ;
end_jump += particles.maps_dynamic[n+1].size() ;
}
}
// free memory
CUDA_SAFE_CALL( hipFree( dev_features_prior ) ) ;
CUDA_SAFE_CALL( hipFree( dev_features_predict ) ) ;
CUDA_SAFE_CALL( hipFree( dev_features_jump ) ) ;
}
//template <class GaussianType>
//void
//predictMap(SynthSLAM& particles)
//{
// // combine all dynamic features into one vector
// vector<Gaussian4D> all_features = combineFeatures(particles.maps_dynamic) ;
// int n_features = all_features.size() ;
// GaussianType* dev_features_prior = NULL ;
// GaussianType* dev_features_predict = NULL ;
// CUDA_SAFE_CALL(hipMalloc((void**)&dev_features_prior,
// n_features*sizeof(Gaussian4D) ) ) ;
// CUDA_SAFE_CALL(hipMalloc((void**)&dev_features_predict,
// n_features*sizeof(Gaussian4D) ) ) ;
// CUDA_SAFE_CALL(hipMemcpy(dev_features_prior,&all_features[0],
// n_features*sizeof(Gaussian4D),
// hipMemcpyHostToDevice) ) ;
// int n_blocks = (n_features+255)/256 ;
// ConstantVelocityMotionModel motion_model ;
// motion_model.std_accx = config.stdAxMap ;
// motion_model.std_accy = config.stdAyMap ;
// predictMapKernel<<<n_blocks,256>>>
// (dev_features_prior,motion_model,n_features, dev_features_predict ) ;
// CUDA_SAFE_CALL(hipMemcpy(&all_features[0],dev_features_predict,
// n_features*sizeof(GaussianType),
// hipMemcpyDeviceToHost)) ;
// // load predicted features back into particles
// GaussianType* begin = &all_features[0] ;
// GaussianType* end = begin
// + particles.maps[0].size() ;
// for ( int n = 0 ; n < particles.n_particles ; n++ )
// {
// particles.maps_dynamic[n].assign(begin,end) ;
// if ( n < particles.n_particles - 1)
// {
// begin = end ;
// end += particles.maps_dynamic[n+1].size() ;
// }
// }
// CUDA_SAFE_CALL( hipFree( dev_features_prior ) ) ;
// CUDA_SAFE_CALL( hipFree( dev_features_predict ) ) ;
//}
/// host-side helper function for PHD filter prediction
void
phdPredict(SynthSLAM& particles, ... )
{
// start timer
hipEvent_t start, stop ;
hipEventCreate( &start ) ;
hipEventCreate( &stop ) ;
hipEventRecord( start,0 ) ;
int n_particles = particles.n_particles ;
int nPredict = n_particles*config.nPredictParticles ;
// allocate device memory
ConstantVelocityState* dev_states_prior = NULL ;
ConstantVelocityState* dev_states_predict = NULL ;
CUDA_SAFE_CALL(
hipMalloc((void**)&dev_states_prior,
n_particles*sizeof(ConstantVelocityState) ) ) ;
CUDA_SAFE_CALL(
hipMalloc((void**)&dev_states_predict,
nPredict*sizeof(ConstantVelocityState) ) ) ;
// copy inputs
CUDA_SAFE_CALL(
hipMemcpy(dev_states_prior, &particles.states[0],
n_particles*sizeof(ConstantVelocityState),
hipMemcpyHostToDevice) ) ;
if ( config.motionType == CV_MOTION )
{
// generate random noise values
std::vector<ConstantVelocityNoise> noiseVector(nPredict) ;
for (unsigned int i = 0 ; i < nPredict ; i++ )
{
noiseVector[i].ax = 3*config.ax * randn() ;
noiseVector[i].ay = 3*config.ay * randn() ;
noiseVector[i].atheta = 3*config.ayaw * randn() ;
}
ConstantVelocityNoise* dev_noise = NULL ;
CUDA_SAFE_CALL(
hipMalloc((void**)&dev_noise,
n_particles*sizeof(ConstantVelocityNoise) ) ) ;
CUDA_SAFE_CALL(
hipMemcpy(dev_noise, &noiseVector[0],
n_particles*sizeof(ConstantVelocityNoise),
hipMemcpyHostToDevice) ) ;
// launch the kernel
int nThreads = min(nPredict,256) ;
int nBlocks = (nPredict+255)/256 ;
hipLaunchKernelGGL(( phdPredictKernel)
, dim3(nBlocks), dim3(nThreads), 0, 0,
dev_states_prior,dev_noise,dev_states_predict,nPredict ) ;
hipFree(dev_noise) ;
}
else if( config.motionType == ACKERMAN_MOTION )
{
// read in the control data structure from variable argument list
va_list argptr ;
va_start(argptr,particles) ;
AckermanControl control = (AckermanControl)va_arg(argptr,AckermanControl) ;
va_end(argptr) ;
// generate random noise values
std::vector<AckermanNoise> noiseVector(nPredict) ;
for (unsigned int i = 0 ; i < nPredict ; i++ )
{
noiseVector[i].n_alpha = config.stdAlpha * randn() ;
noiseVector[i].n_encoder = config.stdEncoder * randn() ;
}
AckermanNoise* dev_noise = NULL ;
CUDA_SAFE_CALL(
hipMalloc((void**)&dev_noise,
nPredict*sizeof(AckermanNoise) ) ) ;
CUDA_SAFE_CALL(
hipMemcpy(dev_noise, &noiseVector[0],
nPredict*sizeof(AckermanNoise),
hipMemcpyHostToDevice) ) ;
// launch the kernel
int nThreads = min(nPredict,256) ;
int nBlocks = (nPredict+255)/256 ;
hipLaunchKernelGGL(( phdPredictKernelAckerman)
, dim3(nBlocks), dim3(nThreads), 0, 0,
dev_states_prior,control,dev_noise,dev_states_predict,nPredict) ;
hipFree(dev_noise) ;
}
// copy results from device
ConstantVelocityState* states_predict = (ConstantVelocityState*)
malloc(nPredict*sizeof(ConstantVelocityState)) ;
CUDA_SAFE_CALL(hipMemcpy(states_predict, dev_states_predict,
nPredict*sizeof(ConstantVelocityState),
hipMemcpyDeviceToHost) ) ;
particles.states.assign( states_predict, states_predict+nPredict ) ;
// duplicate the PHD filter maps and cardinalities for the newly spawned
// vehicle particles, and downscale particle weights
if ( config.nPredictParticles > 1 )
{
DEBUG_MSG("Duplicating maps") ;
vector<vector<Gaussian2D> > maps_predict_static ;
vector<vector<Gaussian4D> > maps_predict_dynamic ;
vector<REAL> weights_predict ;
vector< vector <REAL> > cardinalities_predict ;
vector<int> resample_idx_predict ;
// maps_predict_static.clear();
// maps_predict_static.reserve(nPredict);
// maps_predict_dynamic.clear();
// maps_predict_dynamic.reserve(nPredict);
// weights_predict.clear();
// weights_predict.reserve(nPredict);
// cardinalities_predict.clear();
// cardinalities_predict.reserve(nPredict);
// resample_idx_predict.reserve(nPredict);
for ( int i = 0 ; i < n_particles ; i++ )
{
maps_predict_static.insert( maps_predict_static.end(),
config.nPredictParticles,
particles.maps_static[i] ) ;
maps_predict_dynamic.insert( maps_predict_dynamic.end(),
config.nPredictParticles,
particles.maps_dynamic[i] ) ;
cardinalities_predict.insert( cardinalities_predict.end(),
config.nPredictParticles,
particles.cardinalities[i] ) ;
float new_weight = particles.weights[i] - safeLog(config.nPredictParticles) ;
// DEBUG_VAL(new_weight) ;
weights_predict.insert( weights_predict.end(), config.nPredictParticles,
new_weight ) ;
resample_idx_predict.insert(resample_idx_predict.end(),
config.nPredictParticles,
particles.resample_idx[i]) ;
}
// DEBUG_VAL(maps_predict.size()) ;
DEBUG_MSG("saving duplicated maps") ;
DEBUG_MSG("static") ;
particles.maps_static = maps_predict_static ;
DEBUG_MSG("dynamic") ;
particles.maps_dynamic = maps_predict_dynamic ;
DEBUG_MSG("weights") ;
particles.weights = weights_predict ;
DEBUG_MSG("cardinalities") ;
particles.cardinalities = cardinalities_predict ;
particles.resample_idx = resample_idx_predict ;
particles.n_particles = nPredict ;
}
// map prediction
if(config.featureModel==DYNAMIC_MODEL || config.featureModel==MIXED_MODEL)
predictMapMixed(particles) ;
// log time
hipEventRecord( stop,0 ) ;
hipEventSynchronize( stop ) ;
float elapsed ;
hipEventElapsedTime( &elapsed, start, stop ) ;
fstream predictTimeFile( "predicttime.log", fstream::out|fstream::app ) ;
predictTimeFile << elapsed << endl ;
predictTimeFile.close() ;
// clean up
CUDA_SAFE_CALL( hipFree( dev_states_prior ) ) ;
CUDA_SAFE_CALL( hipFree( dev_states_predict ) ) ;
free(states_predict) ;
}
/// determine which features are in range
/*!
* Each thread block handles a single particle. The threads in the block
* evaluate the range and bearing [blockDim] features in parallel, looping
* through all of the particle's features.
\param predictedFeatures Features from all particles concatenated into a
single array
\param map_sizes_static Number of features in each particle, so that the function
knows where the boundaries are in predictedFeatures
\param n_particles Total number of particles
\param poses Array of particle poses
\param inRange Pointer to boolean array that is filled by the function.
For each feature in predictedFeatures that is in range of its
respective particle, the corresponding entry in this array is set to
true
\param nInRange Pointer to integer array that is filled by the function.
Should be allocated to have [n_particles] elements. Each entry
represents the number of in range features for each particle.
*/
template<class GaussianType>
__global__ void
computeInRangeKernel( GaussianType *predictedFeatures, int* map_sizes_static,
int n_particles, ConstantVelocityState* poses,
char* inRange, int* n_in_range, int* n_nearly_in_range )
{
int tid = threadIdx.x ;
// total number of predicted features per block
int n_featuresBlock ;
// number of inrange features in the particle
__shared__ int nInRangeBlock ;
__shared__ int n_nearly_in_range_block ;
// vehicle pose of the thread block
ConstantVelocityState blockPose ;
GaussianType feature ;
for ( int p = 0 ; p < n_particles ; p += gridDim.x )
{
if ( p + blockIdx.x < n_particles )
{
int predict_offset = 0 ;
// compute the indexing offset for this particle
int map_idx = p + blockIdx.x ;
for ( int i = 0 ; i < map_idx ; i++ )
predict_offset += map_sizes_static[i] ;
// particle-wide values
if ( tid == 0 )
{
nInRangeBlock = 0 ;
n_nearly_in_range_block = 0 ;
}
blockPose = poses[map_idx] ;
n_featuresBlock = map_sizes_static[map_idx] ;
__syncthreads() ;
// loop through features
for ( int i = 0 ; i < n_featuresBlock ; i += blockDim.x )
{
if ( tid+i < n_featuresBlock )
{
// index of thread feature
int featureIdx = predict_offset + tid + i ;
feature = predictedFeatures[featureIdx] ;
// default value
inRange[featureIdx] = 0 ;
// compute the predicted measurement
REAL dx = feature.mean[0] - blockPose.px ;
REAL dy = feature.mean[1] - blockPose.py ;
REAL r2 = dx*dx + dy*dy ;
REAL r = sqrt(r2) ;
REAL bearing = wrapAngle(atan2f(dy,dx) - blockPose.ptheta) ;
if ( r >= dev_config.minRange &&
r <= dev_config.maxRange &&
fabs(bearing) <= dev_config.maxBearing )
{
atomicAdd( &nInRangeBlock, 1 ) ;
inRange[featureIdx] = 1 ;
}
else if ( r >= 0.8*dev_config.minRange &&
r <= 1.2*dev_config.maxRange &&
fabs(bearing) <= 1.2*dev_config.maxBearing )
{
inRange[featureIdx] = 2 ;
atomicAdd( &n_nearly_in_range_block, 1 ) ;
}
}
}
// store nInrange
__syncthreads() ;
if ( tid == 0 )
{
n_in_range[map_idx] = nInRangeBlock ;
n_nearly_in_range[map_idx] = n_nearly_in_range_block ;
}
}
}
}
///// generates a binomial Poisson cardinality distribution for the in-range features.
//__global__ void
//separateCardinalityKernel( Gaussian2D *features, int* map_offsets,
// REAL* cn_inrange)
//{
// int n = threadIdx.x ;
// int map_idx = blockIdx.x ;
// int n_features = map_offsets[map_idx+1] - map_offsets[map_idx] ;
// int feature_idx = map_offsets[map_idx] + n ;
// REAL* cn_shared = (REAL*)shmem ;
// REAL* weights = (REAL*)&cn_shared[dev_config.maxCardinality+1] ;
// // compute product of weights
// REAL val = 0 ;
// if ( n < n_features )
// {
// val = log(features[ feature_idx ].weight) ;
// }
// sumByReduction( weights, val, n ) ;
// REAL log_alpha = weights[0] ;
// __syncthreads() ;
// // load the polynomial roots into shared memory
// if ( n < n_features )
// {
// weights[n] = (1-features[feature_idx].weight)/features[feature_idx].weight ;
// }
// else
// {
// weights[n] = 0 ;
// }
// // compute full cn using recursive algorithm
// cn_shared[n+1] = 0 ;
// int cn_offset = map_idx*(dev_config.maxCardinality+1) ;
// if ( n == 0 )
// {
// cn_shared[0] = 1 ;
// }
// __syncthreads() ;
// for ( int m = 0 ; m < n_features ; m++ )
// {
// REAL tmp1 = cn_shared[n+1] ;
// REAL tmp2 = cn_shared[n] ;
// __syncthreads() ;
// if ( n < m+1 )
// cn_shared[n+1] = tmp1 - weights[m]*tmp2 ;
// __syncthreads() ;
// }
// if ( n <= n_features )
// {
// int idx = cn_offset + (n_features - n) ;
// cn_inrange[idx] = safeLog(fabs(cn_shared[n]))
// + log_alpha ;
// }
// else
// {
// cn_inrange[cn_offset+n] = LOG0 ;
// }
//}
///// compute partially updated weights and updated means & covariances
///**
// \param features Array of all Gaussians from all particles concatenated together
// \param map_sizes Integer array indicating the number of features per particle.
// \param n_particles Number of particles
// \param n_measurements Number of measurements
// \param poses Array of particle poses
// \param w_partial Array of partially updated weights computed by kernel
// */
//__global__ void
//cphdPreUpdateKernel(Gaussian2D *features, int* map_offsets,
// int n_particles, int n_measurements, ConstantVelocityState* poses,
// Gaussian2D* updated_features, REAL* w_partial, REAL* qdw )
//{
// int tid = threadIdx.x + blockIdx.x*blockDim.x ;
// int n_total = (n_measurements+1)*map_offsets[n_particles] ;
// if ( tid >= n_total)
// return ;
// int map_idx = 0 ;
// while ( map_offsets[map_idx]*(n_measurements+1) <= tid )
// {
// map_idx++ ;
// }
// map_idx-- ;
// int n_features = map_offsets[map_idx+1] - map_offsets[map_idx] ;
// int offset = map_offsets[map_idx]*(n_measurements+1) ;
// int feature_idx = floor( (float)(tid-offset)/(n_measurements) ) ;
// if ( feature_idx >= n_features ) // non-detect thread
// {
// int predict_idx = tid - n_features*n_measurements - offset
// + map_offsets[map_idx] ;
// updated_features[tid] = features[predict_idx] ;
// }
// else if ( tid < n_total ) // update thread
// {
// int z_idx = tid - feature_idx*n_measurements - offset ;
// Gaussian2D feature = features[map_offsets[map_idx]+feature_idx] ;
// Gaussian2D updated_feature ;
// RangeBearingMeasurement z = Z[z_idx] ;
// RangeBearingMeasurement z_predict ;
// ConstantVelocityState pose = poses[map_idx] ;
// REAL K[4] = {0,0,0,0} ;
// REAL sigmaInv[4] = {0,0,0,0} ;
// REAL covUpdate[4] = {0,0,0,0} ;
// REAL featurePd = 0 ;
// REAL detSigma = 0 ;
// computePreUpdateComponents( pose, feature, K, covUpdate,
// &detSigma, sigmaInv, &featurePd,
// &z_predict ) ;
// // innovation
// REAL innov[2] = {0,0} ;
// innov[0] = z.range - z_predict.range ;
// innov[1] = wrapAngle(z.bearing - z_predict.bearing) ;
// // updated mean
// updated_feature.mean[0] = feature.mean[0] + K[0]*innov[0] + K[2]*innov[1] ;
// updated_feature.mean[1] = feature.mean[1] + K[1]*innov[0] + K[3]*innov[1] ;
// // updated covariances
// updated_feature.cov[0] = covUpdate[0] ;
// updated_feature.cov[1] = covUpdate[1] ;
// updated_feature.cov[2] = covUpdate[2] ;
// updated_feature.cov[3] = covUpdate[3] ;
// // single-object likelihood
// REAL dist = innov[0]*innov[0]*sigmaInv[0] +
// innov[0]*innov[1]*(sigmaInv[1] + sigmaInv[2]) +
// innov[1]*innov[1]*sigmaInv[3] ;
// // partially updated weight
// updated_feature.weight = safeLog(featurePd) + safeLog(feature.weight)
// - 0.5*dist- safeLog(2*M_PI) - 0.5*safeLog(detSigma) ;
// updated_features[tid] = updated_feature ;
// int w_idx = map_offsets[map_idx]*n_measurements ;
// w_idx += feature_idx*n_measurements + z_idx ;
// w_partial[w_idx] = updated_feature.weight ;
// if ( z_idx == 0 )
// {
// offset = map_offsets[map_idx] ;
// qdw[offset+feature_idx] = safeLog(1-featurePd) + safeLog(feature.weight) ;
// }
// }
//}
///// computes the elementary symmetric polynomial coefficients
///**
// This kernel produces the coefficients of the elementary symmetric function
// for the CPHD update
// \param w_partial Array of partially updated weights
// \param map_sizes Number of features per particle
// \param n_measurements Number of measurements
// \param esf Array of ESF coefficients computed by kernel
// \param esfd Array of ESF coefficients, with each measurement omitted
// */
//__global__ void
//computeEsfKernel( REAL* w_partial, int* map_offsets, int n_measurements,
// REAL* esf, REAL* esfd )
//{
// REAL* lambda = (REAL*)shmem ;
// REAL* esf_shared = (REAL*)&lambda[n_measurements] ;
// // determine indexing offsets
// int tid = threadIdx.x ;
// int map_idx = blockIdx.x ;
// int block_offset = n_measurements*map_offsets[map_idx] ;
// int n_features = map_offsets[map_idx+1] - map_offsets[map_idx] ;
// // compute log lambda
// lambda[tid] = 0 ;
// int idx = block_offset + tid ;
// REAL max_val = -FLT_MAX ;
// for ( int j = 0 ; j < n_features ; j++)
// {
// REAL tmp = w_partial[idx] ;
// REAL tmp_max = fmax(tmp,max_val) ;
// lambda[tid] = exp( max_val - tmp_max )*lambda[tid]
// + exp( tmp - tmp_max ) ;
// max_val = tmp_max ;
// idx += n_measurements ;
// }
// lambda[tid] = safeLog(lambda[tid]) + max_val
// + safeLog(dev_config.clutterRate)
// - safeLog(dev_config.clutterDensity) ;
// __syncthreads() ;
// // compute full esf using recursive algorithm
// esf_shared[tid+1] = 0 ;
// int esf_offset = map_idx*(n_measurements+1) ;
// if ( tid == 0 )
// {
// esf_shared[0] = 1 ;
// esf[esf_offset] = 0 ;
// }
// __syncthreads() ;
// for ( int m = 0 ; m < n_measurements ; m++ )
// {
// REAL tmp1 = esf_shared[tid+1] ;
// REAL tmp2 = esf_shared[tid] ;
// __syncthreads() ;
// if ( tid < m+1 )
// {
//// REAL tmp_sum ;
//// max_val = fmax(tmp1, lambda[m]+tmp2) ;
//// tmp_sum = exp(tmp1-max_val) + exp(lambda[m]+tmp2-max_val) ;
//// esf_shared[tid+1] = safeLog( fabs(tmp_sum) ) + max_val ;
// esf_shared[tid+1] = tmp1 - exp(lambda[m])*tmp2 ;
// }
// __syncthreads() ;
// }
// esf[esf_offset+tid+1] = log(fabs(esf_shared[tid+1])) ;
// // compute esf's for detection terms
// for ( int m = 0 ; m < n_measurements ; m++ )
// {
// int esfd_offset = n_measurements*n_measurements*map_idx + m*n_measurements ;
//// esf_shared[tid+1] = LOG0 ;
// esf_shared[tid+1] = 0 ;
// if ( tid == 0 )
// {
//// esf_shared[0] = 0 ;
//// esfd[esfd_offset] = 0 ;
// esf_shared[0] = 1 ;
// esfd[esfd_offset] = 0 ;
// }
// __syncthreads() ;
// int k = 0 ;
// for ( int n = 0 ; n < n_measurements ; n++ )
// {
// REAL tmp1 = esf_shared[tid+1] ;
// REAL tmp2 = esf_shared[tid] ;
// __syncthreads() ;
// if ( n != m )
// {
// if ( tid < k+1 )
// {
//// REAL tmp_sum ;
//// max_val = fmax(tmp1,lambda[n]+tmp2) ;
//// tmp_sum = exp(tmp1-max_val) - exp(lambda[n]+tmp2-max_val) ;
//// esf_shared[tid+1] = safeLog( fabs(tmp_sum) ) + max_val ;
// esf_shared[tid+1] = tmp1 - exp(lambda[n])*tmp2 ;
// }
// k++ ;
// }
// __syncthreads() ;
// }
// if ( tid < (n_measurements-1) )
// esfd[esfd_offset+tid+1] = log(fabs(esf_shared[tid+1])) ;
// }
//}
///// compute the multi-object likelihoods for the CPHD update
///**
// This kernel computes the terms denoted as Psi in Vo's Analytic CPHD paper, and
// their inner products with the predicted cardinality distribution. It also
// produces the updated cardinality
// */
//__global__ void
//computePsiKernel( Gaussian2D* features_predict, REAL* cn_predict, REAL* esf,
// REAL* esfd, int* map_offsets,
// int n_measurements, REAL* qdw, REAL* dev_factorial,
// REAL* dev_C, REAL* dev_cn_clutter, REAL* cn_update,
// REAL* innerprod_psi0, REAL* innerprod_psi1,
// REAL* innerprod_psi1d )
//{
// int n = threadIdx.x ;
// REAL psi0 = 0 ;
// REAL psi1 = 0 ;
// int map_idx = blockIdx.x ;
// int cn_offset = (dev_config.maxCardinality+1)*map_idx ;
// int esf_offset = (n_measurements+1)*map_idx ;
// int stop_idx = 0 ;
// REAL max_val0 = 0 ;
// REAL max_val1 = 0 ;
// REAL* shdata = (REAL*)shmem ;
// // compute the (log) inner product < q_D, w >
// int map_offset = map_offsets[map_idx] ;
// int n_features = map_offsets[map_idx+1] - map_offsets[map_idx] ;
// REAL innerprod_qdw = 0 ;
// max_val0 = qdw[map_offset] ;
// for ( int j = 0 ; j < n_features ; j+=blockDim.x )
// {
// REAL val = -FLT_MAX ;
// if ( j+n < n_features )
// val = qdw[map_offset+j+n] ;
// maxByReduction(shdata,val,n) ;
// max_val0 = fmax(max_val0,shdata[0]) ;
// __syncthreads() ;
// }
// for ( int j = 0 ; j < n_features ; j+= blockDim.x )
// {
// REAL val = 0 ;
// if ( (j+n) < n_features )
// val = exp(qdw[map_offset+j+n]-max_val0) ;
// sumByReduction( shdata, val, n ) ;
// innerprod_qdw += shdata[0] ;
// __syncthreads() ;
// }
// innerprod_qdw = safeLog(innerprod_qdw) + max_val0 ;
// // compute the (log) inner product < 1, w >
// REAL wsum = 0 ;
// for ( int j = 0 ; j < n_features ; j += blockDim.x )
// {
// REAL val = 0 ;
// if ( (j+n) < n_features )
// val = features_predict[map_offset+j+n].weight ;
// sumByReduction( shdata, val, n );
// wsum += shdata[0] ;
// __syncthreads() ;
// }
// wsum = safeLog(wsum) ;
// // compute (log) PSI0(n) and PSI1(n), using log-sum-exp
// max_val0 = -FLT_MAX ;
// max_val1 = -FLT_MAX ;
// stop_idx = min(n,n_measurements) ;
// for ( int j = 0 ; j <= stop_idx ; j++ )
// {
// // PSI0
// REAL p_coeff = dev_C[n+j*(dev_config.maxCardinality+1)]
// + dev_factorial[j] ;
// REAL aux = dev_factorial[n_measurements-j]
// + dev_cn_clutter[n_measurements-j] + esf[esf_offset+ j]
// - n*wsum ;
// REAL tmp = aux + p_coeff + (n-j)*innerprod_qdw ;
// psi0 = exp(max_val0-fmax(max_val0,tmp))*psi0
// + exp(tmp - fmax(max_val0,tmp) ) ;
// max_val0 = fmax(max_val0,tmp) ;
// // PSI1
// p_coeff = dev_C[n+(j+1)*(dev_config.maxCardinality+1)]
// + dev_factorial[j+1] ;
// tmp = aux + p_coeff + (n-(j+1))*innerprod_qdw ;
// psi1 = exp(max_val1-fmax(max_val1,tmp))*psi1
// + exp(tmp - fmax(max_val1,tmp) ) ;
// max_val1 = fmax(max_val1,tmp) ;
// }
// psi0 = safeLog(psi0) + max_val0 ;
// psi1 = safeLog(psi1) + max_val1 ;
// // (log) inner product of PSI0 and predicted cardinality distribution, using
// // log-sum-exp trick
// REAL val = psi0 + cn_predict[cn_offset+n] ;
// maxByReduction( shdata, val, n ) ;
// max_val0 = shdata[0] ;
// __syncthreads() ;
// sumByReduction( shdata, exp(val-max_val0), n ) ;
// if ( n==0 )
// innerprod_psi0[map_idx] = safeLog(shdata[0]) + max_val0 ;
// // (log) inner product of PSI1 and predicted cardinality distribution, using
// // log-sum-exp trick
// val = psi1 + cn_predict[cn_offset+n] ;
// maxByReduction( shdata, psi1+cn_predict[cn_offset+n], n ) ;
//// shdata[n] = psi1+cn_predict[cn_offset+n] ;
// max_val1 = shdata[0] ;
// __syncthreads() ;
// sumByReduction( shdata, exp( val - max_val1 ), n ) ;
// if ( n == 0 )
// innerprod_psi1[map_idx] = safeLog(shdata[0]) + max_val1 ;
//// __syncthreads() ;
// // PSI1 detection terms
// stop_idx = min(n_measurements - 1, n) ;
// for ( int m = 0 ; m < n_measurements ; m++ )
// {
// int esfd_offset = map_idx * n_measurements * n_measurements
// + m*n_measurements ;
// REAL psi1d = 0 ;
// max_val1 = -FLT_MAX ;
// for ( int j = 0 ; j <= stop_idx ; j++ )
// {
// REAL p_coeff = dev_C[n+(j+1)*(dev_config.maxCardinality+1)]
// + dev_factorial[j+1] ;
// REAL aux = dev_factorial[n_measurements-1-j]
// + dev_cn_clutter[n_measurements-1-j] + esfd[esfd_offset+ j]
// - n*wsum ;
// REAL tmp = aux + p_coeff + (n-(j+1))*innerprod_qdw ;
// psi1d = exp(max_val1-fmax(max_val1,tmp))*psi1d
// + exp(tmp - fmax(max_val1,tmp) ) ;
// max_val1 = fmax(max_val1,tmp) ;
// }
// psi1d = safeLog(psi1d) + max_val1 ;
// val = psi1d + cn_predict[cn_offset+n] ;
// maxByReduction( shdata, val, n ) ;
// max_val1 = shdata[0] ;
// __syncthreads() ;
// sumByReduction( shdata, exp(val-max_val1), n ) ;
// if ( n == 0 )
// innerprod_psi1d[map_idx*n_measurements+m] = safeLog(shdata[0]) + max_val1 ;
// __syncthreads() ;
// }
// // compute log updated cardinality
// cn_update[cn_offset+n] = cn_predict[cn_offset+n] + psi0
// - innerprod_psi0[map_idx] ;
//}
///// perform the gaussian mixture CPHD weight update
///**
// This kernel takes the results produced by the previous three kernels in the
// CPHD pipeline (PreUpdate, ComputeEsf, and ComputePsi) and applies them to
// update the weights of the Gaussian Mixture as in Vo's paper
// Kernel organization: One thread block per particle. Each thread updates all
// the features for one measurement.
// */
//__global__ void
//cphdUpdateKernel( int* map_offsets, int n_measurements,
// REAL* innerprod_psi0, REAL* innerprod_psi1,
// REAL* innerprod_psi1d, bool* merged_flags,
// Gaussian2D* updated_features )
//{
// int z_idx = threadIdx.x ;
// int map_idx = blockIdx.x ;
// int offset = (n_measurements+1)*map_offsets[map_idx] ;
// int n_features = map_offsets[map_idx+1] - map_offsets[map_idx] ;
// // detection update
// REAL psi1d = innerprod_psi1d[n_measurements*map_idx+z_idx] ;
// for ( int j = 0 ; j < n_features ; j++ )
// {
// REAL tmp = updated_features[offset+z_idx].weight
// + psi1d - innerprod_psi0[map_idx] + safeLog(dev_config.clutterRate)
// - safeLog(dev_config.clutterDensity) ;
// updated_features[offset+z_idx].weight = exp(tmp) ;
// if ( exp(tmp) >= dev_config.minFeatureWeight )
// merged_flags[offset + z_idx] = false ;
// else
// merged_flags[offset + z_idx] = true ;
// offset += n_measurements ;
// }
// // non-detection updates
// for ( int j = 0 ; j < n_features ; j += blockDim.x )
// {
// if ( j+z_idx < n_features )
// {
// int nondetect_idx = offset + j + z_idx ;
// REAL tmp = safeLog(updated_features[nondetect_idx].weight)
// + innerprod_psi1[map_idx] - innerprod_psi0[map_idx]
// + safeLog(1-dev_config.pd) ;
// updated_features[nondetect_idx].weight = exp(tmp) ;
// if ( exp(tmp) >= dev_config.minFeatureWeight )
// merged_flags[nondetect_idx] = false ;
// else
// merged_flags[nondetect_idx] = true ;
// }
// }
//}
__global__ void
preUpdateSynthKernel(ConstantVelocityState* poses,
int* pose_indices,
Gaussian2D* features_predict,
REAL* features_pd,
int n_features, int n_measure,
Gaussian2D* features_preupdate){
int tid = blockIdx.x*blockDim.x + threadIdx.x ;
for ( int i = tid ; i < n_features ; i+= gridDim.x*blockDim.x){
// get vehicle pose
ConstantVelocityState pose = poses[pose_indices[i]] ;
// get predicted feature
Gaussian2D feature_predict = features_predict[i] ;
// predicted measurement
REAL dx = feature_predict.mean[0] - pose.px ;
REAL dy = feature_predict.mean[1] - pose.py ;
REAL r2 = dx*dx + dy*dy ;
REAL r = sqrt(r2) ;
REAL bearing = wrapAngle(atan2f(dy,dx) - pose.ptheta) ;
// probability of detection
REAL feature_pd = 0 ;
if ( r <= dev_config.maxRange && fabsf(bearing) <= dev_config.maxBearing )
feature_pd = dev_config.pd ;
features_pd[i] = feature_pd ;
// measurement jacobian wrt feature
REAL J[4] ;
J[0] = dx/r ;
J[2] = dy/r ;
J[1] = -dy/r2 ;
J[3] = dx/r2 ;
REAL* P = feature_predict.cov ;
// BEGIN Maple-Generated expressions
// innovation covariance
REAL sigma[4] ;
sigma[0] = (P[0] * J[0] + J[2] * P[1]) * J[0] + (J[0] * P[2] + P[3] * J[2]) * J[2] + pow(dev_config.stdRange,2) ;
sigma[1] = (P[0] * J[1] + J[3] * P[1]) * J[0] + (J[1] * P[2] + P[3] * J[3]) * J[2];
sigma[2] = (P[0] * J[0] + J[2] * P[1]) * J[1] + (J[0] * P[2] + P[3] * J[2]) * J[3];
sigma[3] = (P[0] * J[1] + J[3] * P[1]) * J[1] + (J[1] * P[2] + P[3] * J[3]) * J[3] + pow(dev_config.stdBearing,2) ;
// enforce symmetry
sigma[1] = (sigma[1]+sigma[2])/2 ;
sigma[2] = sigma[1] ;
REAL det_sigma = sigma[0]*sigma[3] - sigma[1]*sigma[2] ;
// inverse of sigma
REAL S[4] ;
S[0] = sigma[3]/(det_sigma) ;
S[1] = -sigma[1]/(det_sigma) ;
S[2] = -sigma[2]/(det_sigma) ;
S[3] = sigma[0]/(det_sigma) ;
// Kalman gain
REAL K[4] ;
K[0] = S[0]*(P[0]*J[0] + P[2]*J[2]) + S[1]*(P[0]*J[1] + P[2]*J[3]) ;
K[1] = S[0]*(P[1]*J[0] + P[3]*J[2]) + S[1]*(P[1]*J[1] + P[3]*J[3]) ;
K[2] = S[2]*(P[0]*J[0] + P[2]*J[2]) + S[3]*(P[0]*J[1] + P[2]*J[3]) ;
K[3] = S[2]*(P[1]*J[0] + P[3]*J[2]) + S[3]*(P[1]*J[1] + P[3]*J[3]) ;
REAL cov_update[4] ;
cov_update[0] = ((1 - K[0] * J[0] - K[2] * J[1]) * P[0] + (-K[0] * J[2] - K[2] * J[3]) * P[1]) * (1 - K[0] * J[0] - K[2] * J[1]) + ((1 - K[0] * J[0] - K[2] * J[1]) * P[2] + (-K[0] * J[2] - K[2] * J[3]) * P[3]) * (-K[0] * J[2] - K[2] * J[3]) + pow(K[0], 2) * dev_config.stdRange*dev_config.stdRange + pow(K[2], 2) * dev_config.stdBearing*dev_config.stdBearing;
cov_update[2] = ((1 - K[0] * J[0] - K[2] * J[1]) * P[0] + (-K[0] * J[2] - K[2] * J[3]) * P[1]) * (-K[1] * J[0] - K[3] * J[1]) + ((1 - K[0] * J[0] - K[2] * J[1]) * P[2] + (-K[0] * J[2] - K[2] * J[3]) * P[3]) * (1 - K[1] * J[2] - K[3] * J[3]) + K[0] * dev_config.stdRange*dev_config.stdRange * K[1] + K[2] * dev_config.stdBearing*dev_config.stdBearing * K[3];
cov_update[1] = ((-K[1] * J[0] - K[3] * J[1]) * P[0] + (1 - K[1] * J[2] - K[3] * J[3]) * P[1]) * (1 - K[0] * J[0] - K[2] * J[1]) + ((-K[1] * J[0] - K[3] * J[1]) * P[2] + (1 - K[1] * J[2] - K[3] * J[3]) * P[3]) * (-K[0] * J[2] - K[2] * J[3]) + K[0] * dev_config.stdRange*dev_config.stdRange * K[1] + K[2] * dev_config.stdBearing*dev_config.stdBearing * K[3];
cov_update[3] = ((-K[1] * J[0] - K[3] * J[1]) * P[0] + (1 - K[1] * J[2] - K[3] * J[3]) * P[1]) * (-K[1] * J[0] - K[3] * J[1]) + ((-K[1] * J[0] - K[3] * J[1]) * P[2] + (1 - K[1] * J[2] - K[3] * J[3]) * P[3]) * (1 - K[1] * J[2] - K[3] * J[3]) + pow(K[1], 2) * dev_config.stdRange*dev_config.stdRange + pow(K[3], 2) * dev_config.stdBearing*dev_config.stdBearing;
REAL innov[2] ;
REAL dist = 0 ;
for ( int m = 0 ; m < n_measure ; m++ )
{
int idx = m*n_features + i ;
innov[0] = Z[m].range - r ;
innov[1] = wrapAngle(Z[m].bearing - bearing) ;
features_preupdate[idx].mean[0] = feature_predict.mean[0] + K[0]*innov[0] + K[2]*innov[1] ;
features_preupdate[idx].mean[1] = feature_predict.mean[1] + K[1]*innov[0] + K[3]*innov[1] ;
for ( int n = 0 ; n < 4 ; n++ )
features_preupdate[idx].cov[n] = cov_update[n] ;
// compute single object likelihood
dist = innov[0]*innov[0]*S[0] +
innov[0]*innov[1]*(S[1] + S[2]) +
innov[1]*innov[1]*S[3] ;
if(Z[m].label==STATIC_MEASUREMENT || !dev_config.labeledMeasurements)
{
// partially update weight (log-transformed)
features_preupdate[idx].weight = safeLog(feature_pd)
+ safeLog(feature_predict.weight)
- 0.5*dist
- safeLog(2*M_PI)
- 0.5*safeLog(det_sigma) ;
}
else
{
features_preupdate[idx].weight = safeLog(0) ;
}
}
}
}
__global__ void
preUpdateSynthKernel(ConstantVelocityState* poses,
int* pose_indices,
Gaussian4D* features_predict,
REAL* features_pd,
int n_features, int n_measure,
Gaussian4D* features_preupdate){
int tid = blockIdx.x*blockDim.x + threadIdx.x ;
for ( int i = tid ; i < n_features ; i+= gridDim.x*blockDim.x){
// get vehicle pose
ConstantVelocityState pose = poses[pose_indices[i]] ;
// get predicted feature
Gaussian4D feature_predict = features_predict[i] ;
// predicted measurement
REAL dx = feature_predict.mean[0] - pose.px ;
REAL dy = feature_predict.mean[1] - pose.py ;
REAL r2 = dx*dx + dy*dy ;
REAL r = sqrt(r2) ;
REAL bearing = wrapAngle(atan2f(dy,dx) - pose.ptheta) ;
// probability of detection
REAL feature_pd = 0 ;
if ( r <= dev_config.maxRange && fabsf(bearing) <= dev_config.maxBearing )
feature_pd = dev_config.pd ;
features_pd[i] = feature_pd ;
// measurement jacobian wrt feature
REAL J[4] ;
J[0] = dx/r ;
J[2] = dy/r ;
J[1] = -dy/r2 ;
J[3] = dx/r2 ;
REAL* P = feature_predict.cov ;
// BEGIN Maple-Generated expressions
// innovation covariance
REAL sigma[4] ;
REAL var_range = pow(dev_config.stdRange,2) ;
REAL var_bearing = pow(dev_config.stdBearing,2) ;
sigma[0] = J[0] * (P[0] * J[0] + P[4] * J[2]) + J[2] * (P[1] * J[0] + P[5] * J[2]) + var_range;
sigma[1] = J[1] * (P[0] * J[0] + P[4] * J[2]) + J[3] * (P[1] * J[0] + P[5] * J[2]);
sigma[2] = J[0] * (P[0] * J[1] + P[4] * J[3]) + J[2] * (P[1] * J[1] + P[5] * J[3]);
sigma[3] = J[1] * (P[0] * J[1] + P[4] * J[3]) + J[3] * (P[1] * J[1] + P[5] * J[3]) + var_bearing;
// enforce symmetry
sigma[1] = (sigma[1]+sigma[2])/2 ;
sigma[2] = sigma[1] ;
// makePositiveDefinite(sigma) ;
REAL det_sigma = sigma[0]*sigma[3] - sigma[1]*sigma[2] ;
REAL S[4] ;
S[0] = sigma[3]/(det_sigma) ;
S[1] = -sigma[1]/(det_sigma) ;
S[2] = -sigma[2]/(det_sigma) ;
S[3] = sigma[0]/(det_sigma) ;
// Kalman gain
REAL K[8] ;
K[0] = P[0] * (J[0] * S[0] + J[1] * S[1])
+ P[4] * (J[2] * S[0] + J[3] * S[1]);
K[1] = P[1] * (J[0] * S[0] + J[1] * S[1])
+ P[5] * (J[2] * S[0] + J[3] * S[1]);
K[2] = P[2] * (J[0] * S[0] + J[1] * S[1])
+ P[6] * (J[2] * S[0] + J[3] * S[1]);
K[3] = P[3] * (J[0] * S[0] + J[1] * S[1])
+ P[7] * (J[2] * S[0] + J[3] * S[1]);
K[4] = P[0] * (J[0] * S[2] + J[1] * S[3])
+ P[4] * (J[2] * S[2] + J[3] * S[3]);
K[5] = P[1] * (J[0] * S[2] + J[1] * S[3])
+ P[5] * (J[2] * S[2] + J[3] * S[3]);
K[6] = P[2] * (J[0] * S[2] + J[1] * S[3])
+ P[6] * (J[2] * S[2] + J[3] * S[3]);
K[7] = P[3] * (J[0] * S[2] + J[1] * S[3])
+ P[7] * (J[2] * S[2] + J[3] * S[3]);
// Updated covariance (Joseph Form)
REAL cov_update[16] ;
cov_update[0] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + var_range * pow( K[0], 2) + var_bearing * pow( K[4], 2);
cov_update[1] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + K[0] * var_range * K[1] + K[4] * var_bearing * K[5];
cov_update[2] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + P[2] * (1 - K[0] * J[0] - K[4] * J[1]) + P[6] * (-K[0] * J[2] - K[4] * J[3]) + K[0] * var_range * K[2] + K[4] * var_bearing * K[6];
cov_update[3] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + P[3] * (1 - K[0] * J[0] - K[4] * J[1]) + P[7] * (-K[0] * J[2] - K[4] * J[3]) + K[0] * var_range * K[3] + K[4] * var_bearing * K[7];
cov_update[4] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + K[0] * var_range * K[1] + K[4] * var_bearing * K[5];
cov_update[5] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + var_range * pow( K[1], 2) + var_bearing * pow( K[5], 2);
cov_update[6] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + P[2] * (-K[1] * J[0] - K[5] * J[1]) + P[6] * (1 - K[1] * J[2] - K[5] * J[3]) + K[1] * var_range * K[2] + K[5] * var_bearing * K[6];
cov_update[7] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + P[3] * (-K[1] * J[0] - K[5] * J[1]) + P[7] * (1 - K[1] * J[2] - K[5] * J[3]) + K[1] * var_range * K[3] + K[5] * var_bearing * K[7];
cov_update[8] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + K[0] * var_range * K[2] + K[4] * var_bearing * K[6];
cov_update[9] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + K[1] * var_range * K[2] + K[5] * var_bearing * K[6];
cov_update[10] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + P[2] * (-K[2] * J[0] - K[6] * J[1]) + P[6] * (-K[2] * J[2] - K[6] * J[3]) + P[10] + var_range * pow( K[2], 2) + var_bearing * pow( K[6], 2);
cov_update[11] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + P[3] * (-K[2] * J[0] - K[6] * J[1]) + P[7] * (-K[2] * J[2] - K[6] * J[3]) + P[11] + K[2] * var_range * K[3] + K[6] * var_bearing * K[7];
cov_update[12] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + K[0] * var_range * K[3] + K[4] * var_bearing * K[7];
cov_update[13] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + K[1] * var_range * K[3] + K[5] * var_bearing * K[7];
cov_update[14] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + P[2] * (-K[3] * J[0] - K[7] * J[1]) + P[6] * (-K[3] * J[2] - K[7] * J[3]) + P[14] + K[2] * var_range * K[3] + K[6] * var_bearing * K[7];
cov_update[15] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + P[3] * (-K[3] * J[0] - K[7] * J[1]) + P[7] * (-K[3] * J[2] - K[7] * J[3]) + P[15] + var_range * pow( K[3], 2) + var_bearing * pow( K[7], 2);
REAL innov[2] ;
REAL dist = 0 ;
for ( int m = 0 ; m < n_measure ; m++ )
{
int idx = m*n_features+i ;
innov[0] = Z[m].range - r ;
innov[1] = wrapAngle(Z[m].bearing - bearing) ;
features_preupdate[idx].mean[0] = feature_predict.mean[0] + K[0]*innov[0] + K[4]*innov[1] ;
features_preupdate[idx].mean[1] = feature_predict.mean[1] + K[1]*innov[0] + K[5]*innov[1] ;
features_preupdate[idx].mean[2] = feature_predict.mean[2] + K[2]*innov[0] + K[6]*innov[1] ;
features_preupdate[idx].mean[3] = feature_predict.mean[3] + K[3]*innov[0] + K[7]*innov[1] ;
for ( int n = 0 ; n < 16 ; n++ )
features_preupdate[idx].cov[n] = cov_update[n] ;
// compute single object likelihood
dist = innov[0]*innov[0]*S[0] +
innov[0]*innov[1]*(S[1] + S[2]) +
innov[1]*innov[1]*S[3] ;
if(Z[m].label==DYNAMIC_MEASUREMENT || !dev_config.labeledMeasurements)
{
// partially update weight (log-transformed)
features_preupdate[idx].weight = safeLog(feature_pd)
+ safeLog(feature_predict.weight)
- 0.5*dist
- safeLog(2*M_PI)
- 0.5*safeLog(det_sigma) ;
}
else
{
features_preupdate[idx].weight = safeLog(0) ;
}
}
}
}
/// perform the gaussian mixture PHD update
/**
PHD update algorithm as in Vo & Ma 2006.
\param inRangeFeatures Array of in-range Gaussians, with which the PHD
update will be performed
\param map_sizes_static Integer array of sizes of each particle's map
\param n_measure Number of measurements
\param poses Array of particle poses
\param compatibleZ char array which will be computed by the kernel.
Indicates which measurements have been found compatible with an existing
gaussian.
\param updated_features Stores the updated Gaussians computed by the kernel
\param mergedFeatures Stores the post-merge updated Gaussians computed by
the kernel.
\param mergedSizes Stores the number of Gaussians left in each map after
merging. This is required because the same amount of memory is allocated
for both updated_features and mergedFeatures. The indexing boundaries for
the maps will be the same, but only the first n gaussians after the
boundary will be valid for the mergedFeatures array.
\param mergedFlags Array of booleans used by the merging algorithm to keep
track of which features have already be merged.
\param particleWeights New particle weights after PHD update
*/
template <class GaussianType>
__global__ void
phdUpdateKernel(GaussianType* features_predict,
REAL* featurePd,
GaussianType* features_preupdate,
GaussianType* features_birth,
int* map_offsets,
int n_particles, int n_measure,
GaussianType* features_update,
bool* merge_flags,
REAL* particle_weights)
{
// shared memory variables
__shared__ REAL sdata[256] ;
REAL particle_weight = 0 ;
REAL cardinality_predict = 0 ;
int update_offset = 0 ;
int n_features = 0 ;
int n_update = 0 ;
int predict_offset = 0 ;
int preupdate_offset = 0 ;
int birth_offset = 0 ;
// initialize variables
int tid = threadIdx.x ;
// pre-update variables
GaussianType feature ;
// update variables
int preupdate_stride = map_offsets[n_particles] ;
REAL w_partial = 0 ;
int updateIdx = 0 ;
// loop over particles
for ( int map_idx = blockIdx.x ; map_idx < n_particles ; map_idx += gridDim.x )
{
// initialize map-specific variables
predict_offset = map_offsets[map_idx] ;
update_offset = predict_offset*(n_measure+1) +
map_idx*n_measure ;
preupdate_offset = predict_offset ;
n_features = map_offsets[map_idx+1] - map_offsets[map_idx] ;
n_update = (n_features)*(n_measure+1) + n_measure ;
particle_weight = 0 ;
cardinality_predict = 0.0 ;
birth_offset = map_idx*n_measure ;
// loop over predicted features + newborn features
for ( int j = 0 ; j < (n_features+n_measure) ; j += blockDim.x )
{
int feature_idx = j + tid ;
w_partial = 0 ;
if ( feature_idx < n_features )
{
// persistent feature
feature = features_predict[predict_offset+feature_idx] ;
REAL pd = featurePd[predict_offset+feature_idx] ;
// save non-detection term
int idx_nondetect = update_offset
+ feature_idx ;
copy_gaussians(feature,features_update[idx_nondetect]) ;
features_update[idx_nondetect].weight *= (1-pd) ;
// save the detection terms
for (int m = 0 ; m < n_measure ; m++){
int preupdate_idx = m*preupdate_stride +
preupdate_offset + feature_idx ;
int update_idx = update_offset + n_features +
m*n_features + feature_idx ;
copy_gaussians(features_preupdate[preupdate_idx],
features_update[update_idx]) ;
}
w_partial = pd*feature.weight ;
}
else if (feature_idx < n_features+n_measure)
{
// newborn feature
// find measurement corresponding to current thread
int z_idx = feature_idx - n_features ;
int idx_birth = update_offset + n_features
+ n_measure*n_features + z_idx ;
copy_gaussians(features_birth[birth_offset+z_idx],
features_update[idx_birth]) ;
w_partial = dev_config.birthWeight ;
}
else
{
// thread does not correspond to a feature
w_partial = 0 ;
}
// compute predicted cardinality
sumByReduction(sdata, w_partial, tid) ;
cardinality_predict += sdata[0] ;
__syncthreads() ;
}
// cuPrintf("particle_weight=%f\n",particle_weight) ;
// compute the weight normalizers
for ( int i = 0 ; i < n_measure ; i++ )
{
REAL log_normalizer = 0 ;
REAL val = 0 ;
REAL sum = 0 ;
GaussianType* ptr_update = features_update
+ update_offset + n_features + i*n_features ;
// cuPrintf("%f\n",features_update[0].weight) ;
if (n_features > 0)
{
// find the maximum from all the log partial weights
for ( int j = 0 ; j < (n_features) ; j += blockDim.x )
{
int feature_idx = j + tid ;
if ( feature_idx < n_features )
val = exp(ptr_update[feature_idx].weight) ;
else
val = 0 ;
sumByReduction(sdata,val,tid);
sum += sdata[0] ;
}
// add clutter density and birth weight
sum += dev_config.clutterDensity ;
sum += dev_config.birthWeight ;
// put normalizer in log form
log_normalizer = safeLog(sum) ;
}
else
{
sum = dev_config.clutterDensity + dev_config.birthWeight ;
log_normalizer = safeLog(sum) ;
}
// compute final feature weights
for ( int j = 0 ; j < (n_features+1) ; j += blockDim.x )
{
int feature_idx = j + tid ;
if ( feature_idx <= n_features)
{
if ( feature_idx < n_features )
{
// update detection weight
updateIdx = feature_idx ;
}
else if ( feature_idx == n_features )
{
// update birth term weight
updateIdx = (n_measure-i)*n_features + i ;
// cuPrintf("%d\n",updateIdx) ;
}
ptr_update[updateIdx].weight =
exp(ptr_update[updateIdx].weight-log_normalizer) ;
}
}
// update the pose particle weights
if ( tid == 0 )
{
// cuPrintf("normalizer = %f\n",log_normalizer) ;
particle_weight += log_normalizer ;
}
}
// Particle weighting
__syncthreads() ;
if ( tid == 0 )
{
if (dev_config.particleWeighting==0){
particle_weight -= cardinality_predict ;
particle_weights[map_idx] = particle_weight ;
}
else if (dev_config.particleWeighting==1){
// compute predicted cardinality
float cn_predict = 0 ;
for ( int i = 0 ; i < n_features ; i++ ){
cn_predict += features_predict[predict_offset+i].weight ;
}
// compute updated cardnality
float cn_update = 0 ;
for ( int i = 0 ; i < n_features*(n_measure+1) + n_measure ; i++){
cn_update += features_update[update_offset+i].weight ;
}
particle_weights[map_idx] = n_measure*dev_config.clutterDensity
+ cn_update - cn_predict
- dev_config.clutterRate ;
}
}
// set the merging flags
for ( int j = 0 ; j < n_update ; j+=blockDim.x)
{
int feature_idx = j+tid ;
if (feature_idx < n_update)
{
int idx = update_offset+feature_idx ;
if (features_update[idx].weight<dev_config.minFeatureWeight)
merge_flags[idx] = true ;
else
merge_flags[idx] = false;
}
}
}
}
__global__ void
phdUpdateKernelMixed(ConstantVelocityState* poses,
Gaussian2D* features_predict_static,
Gaussian4D* features_predict_dynamic,
int* map_offsets_static, int* map_offsets_dynamic,
int n_particles, int n_measure,
Gaussian2D* features_update_static,
Gaussian4D* features_update_dynamic,
bool* merge_flags_static, bool* merge_flags_dynamic,
REAL* particle_weights)
{
__shared__ REAL sdata[256] ;
int tid = threadIdx.x ;
int map_idx = 0 ;
int feature_idx = 0 ;
ConstantVelocityState pose ;
int n_features_static = 0 ;
int n_features_dynamic = 0 ;
int predict_offset_static = 0 ;
int predict_offset_dynamic = 0 ;
int update_offset_static = 0 ;
int update_offset_dynamic = 0 ;
int n_update_static = 0 ;
int n_update_dynamic = 0 ;
REAL cardinality_predict = 0 ;
REAL particle_weight = 0 ;
// loop over particles
for ( int p = 0 ; p < n_particles ; p += gridDim.x )
{
map_idx = p + blockIdx.x ;
if ( map_idx < n_particles )
{
// compute offsets for the current map
n_features_static = map_offsets_static[map_idx+1]
- map_offsets_static[map_idx] ;
n_features_dynamic = map_offsets_dynamic[map_idx+1]
- map_offsets_dynamic[map_idx] ;
predict_offset_static = map_offsets_static[map_idx] ;
predict_offset_dynamic = map_offsets_dynamic[map_idx] ;
update_offset_static = predict_offset_static
+ n_measure*predict_offset_static
+ map_idx*n_measure ;
update_offset_dynamic = predict_offset_dynamic
+ n_measure*predict_offset_dynamic
+ map_idx*n_measure ;
n_update_static = n_features_static
+ n_measure*n_features_static
+ n_measure ;
n_update_dynamic = n_features_dynamic
+ n_measure*n_features_dynamic
+ n_measure ;
// get the corresponding vehicle pose
pose = poses[map_idx] ;
__syncthreads() ;
// reinitialize predicted cardinality
cardinality_predict = 0 ;
// initialize log(particle_weight) update to 1
particle_weight = 0 ;
for ( int j = 0 ; j < (n_features_static+n_measure+n_features_dynamic+n_measure) ; j += blockDim.x )
{
feature_idx = j + tid ;
// Distribution of features to threads:
// [persistent_static | birth_static | persistent_dynamic | birth_dynamic ]
REAL feature_pd = 0 ;
REAL val = 0 ;
bool is_static = (feature_idx < n_features_static+n_measure) ;
bool is_dynamic = (feature_idx < n_features_static+n_measure+n_features_dynamic+n_measure)
&& !is_static ;
if ( is_static)
{
Gaussian2D* ptr_update = NULL ;
Gaussian2D* ptr_nondetect = NULL ;
if(feature_idx < n_features_static)
{
ptr_nondetect = features_update_static
+ update_offset_static + feature_idx ;
ptr_update = ptr_nondetect + n_features_static ;
computePreUpdate( pose, features_predict_static[predict_offset_static+feature_idx],
n_features_static, n_measure, feature_pd,
*ptr_nondetect, ptr_update ) ;
val = feature_pd
*features_predict_static[feature_idx].weight ;
}
else if (feature_idx < n_features_static+n_measure)
{
int z_idx = feature_idx - n_features_static ;
ptr_update = features_update_static + update_offset_static
+ n_features_static
+ n_measure*n_features_static + z_idx ;
computeBirth(pose, Z[z_idx],*ptr_update) ;
}
}
else if(is_dynamic)
{
int feature_idx_dynamic = feature_idx
- n_features_static - n_measure ;
Gaussian4D* ptr_update = NULL ;
Gaussian4D* ptr_nondetect = NULL ;
if(feature_idx_dynamic < n_features_dynamic)
{
ptr_nondetect = features_update_dynamic
+ update_offset_dynamic + feature_idx_dynamic ;
ptr_update = ptr_nondetect + n_features_dynamic ;
computePreUpdate( pose, features_predict_dynamic[predict_offset_dynamic+feature_idx_dynamic],
n_features_dynamic, n_measure, feature_pd,
*ptr_nondetect,ptr_update ) ;
val = feature_pd
*features_predict_dynamic[feature_idx_dynamic].weight ;
}
else if(feature_idx_dynamic < n_features_dynamic+n_measure )
{
int z_idx = feature_idx_dynamic - n_features_dynamic ;
ptr_update = features_update_dynamic + update_offset_dynamic
+ n_features_dynamic
+ n_features_dynamic*n_measure + z_idx ;
computeBirth(pose, Z[z_idx],*ptr_update) ;
// cuPrintf("Dynamic birth weight: %f\n",ptr_update->weight) ;
val = 0 ;
}
}
else
{
// not a valid feature index
val = 0 ;
}
// compute predicted cardinality
sumByReduction( sdata, val, tid );
cardinality_predict += sdata[0] ;
__syncthreads() ;
}
// finish updating weights - loop over measurements
for ( int m = 0 ; m < n_measure ; m++ )
{
// pointers offset to updated features corresponding to current
// measurement
Gaussian2D* ptr_static = features_update_static
+ update_offset_static
+ n_features_static
+ m*(n_features_static) ;
Gaussian4D* ptr_dynamic = features_update_dynamic
+ update_offset_dynamic
+ n_features_dynamic
+ m*(n_features_dynamic) ;
REAL normalizer = 0 ;
// normalizer is the sum of partially updated weights
// corresponding to current measurement.
for ( int j = 0 ; j < n_features_static+n_features_dynamic ; j += blockDim.x )
{
feature_idx = j + tid ;
// REAL val = -FLT_MAX ;
REAL val = 0 ;
bool is_static = feature_idx < n_features_static ;
bool is_dynamic = (feature_idx < n_features_static+n_features_dynamic)
&& !is_static ;
if ( is_static )
val = exp(ptr_static[feature_idx].weight) ;
else if(is_dynamic)
val = exp(ptr_dynamic[feature_idx-n_features_static].weight) ;
sumByReduction(sdata,val,tid);
normalizer += sdata[0] ;
}
normalizer += dev_config.clutterDensity
+ dev_config.birthWeight ;
// we get 2 birth terms when measurements are unlabeled
if ( !dev_config.labeledMeasurements )
normalizer += dev_config.birthWeight ;
normalizer = safeLog(normalizer) ;
// loop through features corresponding to current measurement,
// and divide by normalizer.
for ( int j = 0 ; j < n_features_static+1+n_features_dynamic+1 ; j+=blockDim.x )
{
feature_idx = j+tid ;
int idx_update = - 1 ;
bool is_static = (feature_idx < n_features_static+1) ;
bool is_dynamic = (feature_idx<(n_features_static+1+n_features_dynamic+1))
&& ~is_static ;
if ( is_static)
{
int idx_update = -1 ;
if(feature_idx < n_features_static)
{
idx_update = feature_idx ;
}
else if (feature_idx == n_features_static)
{
idx_update = (n_measure-m)*n_features_static + m ;
}
ptr_static[idx_update].weight =
exp(ptr_static[idx_update].weight - normalizer) ;
}
else if(is_dynamic)
{
int feature_idx_dynamic = feature_idx
- n_features_static - 1 ;
if(feature_idx_dynamic < n_features_dynamic)
{
idx_update = feature_idx_dynamic ;
}
else if(feature_idx_dynamic==n_features_dynamic)
{
idx_update = (n_measure-m)*n_features_dynamic + m ;
}
ptr_dynamic[idx_update].weight =
exp(ptr_dynamic[idx_update].weight - normalizer) ;
}
}
// multiply particle weight update by normalizer
__syncthreads() ;
particle_weight += normalizer ;
}
// finish updating particle weight
particle_weight -= cardinality_predict ;
if ( tid == 0){
if (dev_config.particleWeighting==0){
particle_weights[map_idx] = particle_weight ;
}
else if (dev_config.particleWeighting == 1){
// compute predicted cardinality
float cn_predict = 0 ;
for ( int i = 0 ; i < n_features_static ; i++ ){
cn_predict +=
features_predict_static[predict_offset_static+i].weight ;
}
for ( int i = 0 ; i < n_features_dynamic ; i++ ){
cn_predict +=
features_predict_dynamic[predict_offset_dynamic+i].weight ;
}
cn_predict += n_measure*dev_config.birthWeight ;
// compute updated cardnality
float cn_update = 0 ;
for ( int i = 0 ; i < n_features_static*(n_measure+1) + n_measure ; i++){
cn_update += features_update_static[update_offset_static+i].weight ;
}
for ( int i = 0 ; i < n_features_dynamic*(n_measure+1) + n_measure ; i++){
cn_update += features_update_dynamic[update_offset_dynamic+i].weight ;
}
particle_weights[map_idx] = n_measure*dev_config.clutterDensity
+ cn_update - cn_predict
- dev_config.clutterRate ;
}
}
}
// set the merging flags
for ( int j = 0 ; j < n_update_static+n_update_dynamic ; j+=blockDim.x)
{
int feature_idx = j+tid ;
bool is_static = (feature_idx < n_update_static) ;
bool is_dynamic = (feature_idx < n_update_static+n_update_dynamic)
&& !is_static ;
if (is_static)
{
if (features_update_static[update_offset_static+feature_idx].weight<dev_config.minFeatureWeight)
merge_flags_static[update_offset_static+feature_idx] = true ;
else
merge_flags_static[update_offset_static+feature_idx] = false;
}
else if(is_dynamic)
{
feature_idx = feature_idx-n_update_static ;
if (features_update_dynamic[update_offset_dynamic+feature_idx].weight<dev_config.minFeatureWeight)
merge_flags_dynamic[update_offset_dynamic+feature_idx] = true;
else
merge_flags_dynamic[update_offset_dynamic+feature_idx] = false;
}
}
}
}
template <class GaussianType>
__global__ void
phdUpdateMergeKernel(GaussianType* updated_features,
GaussianType* mergedFeatures, int *mergedSizes,
bool *mergedFlags, int* map_offsets, int n_particles )
{
__shared__ GaussianType maxFeature ;
__shared__ GaussianType mergedFeature ;
__shared__ REAL sdata[256] ;
__shared__ int mergedSize ;
__shared__ int update_offset ;
__shared__ int n_update ;
int tid = threadIdx.x ;
REAL dist ;
GaussianType feature ;
clearGaussian(feature) ;
int dims = getGaussianDim(feature) ;
// loop over particles
for ( int p = 0 ; p < n_particles ; p += gridDim.x )
{
int map_idx = p + blockIdx.x ;
if ( map_idx < n_particles )
{
// initialize shared vars
if ( tid == 0)
{
update_offset = map_offsets[map_idx] ;
n_update = map_offsets[map_idx+1] - map_offsets[map_idx] ;
mergedSize = 0 ;
}
__syncthreads() ;
while(true)
{
// initialize the output values to defaults
if ( tid == 0 )
{
maxFeature.weight = -1 ;
clearGaussian(mergedFeature) ;
}
sdata[tid] = -1 ;
__syncthreads() ;
// find the maximum feature with parallel reduction
for ( int i = update_offset ; i < update_offset + n_update ; i += blockDim.x)
{
int idx = i + tid ;
if ( idx < (update_offset + n_update) )
{
if( !mergedFlags[idx] )
{
if (sdata[tid] == -1 ||
updated_features[(unsigned int)sdata[tid]].weight < updated_features[idx].weight )
{
sdata[tid] = idx ;
}
}
}
}
__syncthreads() ;
for ( int s = blockDim.x/2 ; s > 0 ; s >>= 1 )
{
if ( tid < s )
{
if ( sdata[tid] == -1 )
sdata[tid] = sdata[tid+s] ;
else if ( sdata[tid+s] >= 0 )
{
if(updated_features[(unsigned int)sdata[tid]].weight <
updated_features[(unsigned int)sdata[tid+s]].weight )
{
sdata[tid] = sdata[tid+s] ;
}
}
}
__syncthreads() ;
}
if ( sdata[0] == -1 || maxFeature.weight == 0 )
break ;
else if(tid == 0)
maxFeature = updated_features[ (unsigned int)sdata[0] ] ;
__syncthreads() ;
// find features to merge with max feature
REAL sval0 = 0 ;
// REAL sval1 = 0 ;
// REAL sval2 = 0 ;
clearGaussian(feature) ;
for ( int i = update_offset ; i < update_offset + n_update ; i += blockDim.x )
{
int idx = tid + i ;
if ( idx < update_offset+n_update )
{
if ( !mergedFlags[idx] )
{
if ( dev_config.distanceMetric == 0 )
dist = computeMahalDist(maxFeature, updated_features[idx]) ;
else if ( dev_config.distanceMetric == 1)
dist = computeHellingerDist(maxFeature, updated_features[idx]) ;
if ( dist < dev_config.minSeparation )
{
feature.weight += updated_features[idx].weight ;
for ( int j = 0 ; j < dims ; j++ )
feature.mean[j] += updated_features[idx].weight*updated_features[idx].mean[j] ;
}
}
}
}
// merge means and weights
sval0 = feature.weight ;
sumByReduction(sdata, sval0, tid) ;
if ( tid == 0 )
mergedFeature.weight = sdata[0] ;
__syncthreads() ;
if ( mergedFeature.weight == 0 )
break ;
for ( int j = 0 ; j < dims ; j++ )
{
sval0 = feature.mean[j] ;
sumByReduction(sdata,sval0,tid);
if( tid == 0 )
mergedFeature.mean[j] = sdata[0]/mergedFeature.weight ;
__syncthreads() ;
}
// merge the covariances
sval0 = 0 ;
// sval1 = 0 ;
// sval2 = 0 ;
clearGaussian(feature) ;
for ( int i = update_offset ; i < update_offset+n_update ; i += blockDim.x )
{
int idx = tid + i ;
if ( idx < update_offset+n_update )
{
if (!mergedFlags[idx])
{
if ( dev_config.distanceMetric == 0 )
dist = computeMahalDist(maxFeature, updated_features[idx]) ;
else if ( dev_config.distanceMetric == 1)
dist = computeHellingerDist(maxFeature, updated_features[idx]) ;
if ( dist < dev_config.minSeparation )
{
// use the mean of the local gaussian variable
// to store the innovation vector
for (int j = 0 ; j < dims ; j++)
{
feature.mean[j] = mergedFeature.mean[j]
- updated_features[idx].mean[j] ;
}
for (int j = 0 ; j < dims ; j++ )
{
REAL outer = feature.mean[j] ;
for ( int k = 0 ; k < dims ; k++)
{
REAL inner = feature.mean[k] ;
feature.cov[j*dims+k] +=
updated_features[idx].weight*
(updated_features[idx].cov[j*dims+k]
+ outer*inner) ;
}
}
mergedFlags[idx] = true ;
}
}
}
}
for ( int j = 0 ; j < dims*dims ; j++)
{
sval0 = feature.cov[j] ;
sumByReduction(sdata,sval0,tid);
if ( tid == 0 )
mergedFeature.cov[j] = sdata[0]/mergedFeature.weight ;
__syncthreads() ;
}
if ( tid == 0 )
{
force_symmetric_covariance(mergedFeature) ;
int mergeIdx = update_offset + mergedSize ;
copy_gaussians(mergedFeature,mergedFeatures[mergeIdx]) ;
mergedSize++ ;
}
__syncthreads() ;
}
__syncthreads() ;
// save the merged map size
if ( tid == 0 )
mergedSizes[map_idx] = mergedSize ;
}
} // end loop over particles
return ;
}
template <class GaussianType>
void
prepareUpdateInputs(vector<vector<GaussianType> > maps,
ConstantVelocityState* dev_poses,
int n_particles, int n_measure,
GaussianType*& dev_maps_inrange,
int*& dev_map_offsets, GaussianType*& dev_maps_updated,
bool*& dev_merged_flags,
vector<GaussianType>& features_in,
vector<GaussianType>& features_out1 ,
vector<GaussianType>& features_out2,
vector<int>& n_in_range_vec,
vector<int>& n_out_range1_vec,
vector<int>& n_out_range2_vec )
{
//------- Variable Declarations ---------//
vector<GaussianType> concat ;
vector<int> map_sizes(n_particles) ;
int nThreads = 0 ;
// map offsets
vector<int> map_offsets_in(n_particles+1,0) ;
vector<int> map_offsets_out(n_particles+1,0) ;
// device variables
GaussianType* dev_maps = NULL ;
int* dev_map_sizes = NULL ;
int* dev_n_in_range = NULL ;
int* dev_n_out_range2 = NULL ;
char* dev_in_range = NULL ;
int total_features = 0 ;
// in/out range book-keeping variables
int n_in_range = 0 ;
int n_out_range = 0 ;
int idx_in = 0 ;
int idx_out = 0 ;
int idx_out2 = 0 ;
int n_out_range1 = 0 ;
int n_out_range2 = 0 ;
vector<char> in_range ;
//------- End Variable Declarations -----//
///////////////////////////////////////////////////////////////////////////
//
// concatenate all the maps together for parallel processing
//
///////////////////////////////////////////////////////////////////////////
for ( unsigned int n = 0 ; n < n_particles ; n++ )
{
concat.insert( concat.end(),
maps[n].begin(),
maps[n].end() ) ;
map_sizes[n] = maps[n].size() ;
// keep track of largest map feature count
if ( map_sizes[n] > nThreads )
nThreads = map_sizes[n] ;
nThreads = min(nThreads,256) ;
total_features += map_sizes[n] ;
}
// allocate device space for map sizes
CUDA_SAFE_CALL(
hipMalloc( (void**)&dev_map_sizes,
n_particles*sizeof(int) ) ) ;
if ( total_features > 0)
{
///////////////////////////////////////////////////////////////////////
//
// split features into in/out range parts
//
///////////////////////////////////////////////////////////////////////
// allocate device memory
CUDA_SAFE_CALL(
hipMalloc( (void**)&dev_maps,
total_features*sizeof(GaussianType) ) ) ;;
CUDA_SAFE_CALL(
hipMalloc( (void**)&dev_n_in_range,
n_particles*sizeof(int) ) ) ;
CUDA_SAFE_CALL(
hipMalloc( (void**)&dev_n_out_range2,
n_particles*sizeof(int) ) ) ;
CUDA_SAFE_CALL(
hipMalloc( (void**)&dev_in_range,
total_features*sizeof(char) ) ) ;
// copy inputs
CUDA_SAFE_CALL(
hipMemcpy( dev_maps, &concat[0], total_features*sizeof(GaussianType),
hipMemcpyHostToDevice )
) ;
CUDA_SAFE_CALL(
hipMemcpy( dev_map_sizes, &map_sizes[0], n_particles*sizeof(int),
hipMemcpyHostToDevice )
) ;
// kernel launch
DEBUG_MSG("launching computeInRangeKernel") ;
DEBUG_VAL(nThreads) ;
hipLaunchKernelGGL(( computeInRangeKernel), dim3(n_particles),dim3(nThreads), 0, 0,
dev_maps, dev_map_sizes, n_particles, dev_poses, dev_in_range,
dev_n_in_range, dev_n_out_range2 ) ;
CUDA_SAFE_THREAD_SYNC();
// allocate outputs
in_range.resize(total_features);
// copy outputs
CUDA_SAFE_CALL(
hipMemcpy( &in_range[0],dev_in_range,
total_features*sizeof(char),
hipMemcpyDeviceToHost )
) ;
CUDA_SAFE_CALL(
hipMemcpy( &n_in_range_vec[0],dev_n_in_range,n_particles*sizeof(int),
hipMemcpyDeviceToHost )
) ;
CUDA_SAFE_CALL(
hipMemcpy( &n_out_range2_vec[0],dev_n_out_range2,n_particles*sizeof(int),
hipMemcpyDeviceToHost )
) ;
// get total number of in-range features
for ( int i = 0 ; i < n_particles ; i++ )
{
n_in_range += n_in_range_vec[i] ;
n_out_range1_vec[i] = maps[i].size() - n_in_range_vec[i]
- n_out_range2_vec[i] ;
n_out_range2 += n_out_range2_vec[i] ;
}
// divide features into in-range/out-of-range parts
n_out_range = total_features - n_in_range ;
n_out_range1 = n_out_range - n_out_range2 ;
DEBUG_VAL(n_in_range) ;
DEBUG_VAL(n_out_range1) ;
DEBUG_VAL(n_out_range2) ;
features_in.resize(n_in_range) ;
features_out1.resize(n_out_range1) ;
features_out2.resize(n_out_range2) ;
for ( int i = 0 ; i < total_features ; i++ )
{
if (in_range[i] == 1)
features_in[idx_in++] = concat[i] ;
else if (in_range[i] == 2 )
features_out2[idx_out2++] = concat[i] ;
else
features_out1[idx_out++] = concat[i] ;
}
// free memory
CUDA_SAFE_CALL( hipFree( dev_maps ) ) ;
CUDA_SAFE_CALL( hipFree( dev_in_range ) ) ;
CUDA_SAFE_CALL( hipFree( dev_n_in_range ) ) ;
// perform an (inclusive) prefix scan on the map sizes to determine indexing
// offsets for each map
for ( int i = 1 ; i < n_particles+1 ; i++ )
{
map_offsets_in[i] = map_offsets_in[i-1] + n_in_range_vec[i-1] ;
map_offsets_out[i] = map_offsets_out[i-1] + n_in_range_vec[i-1] ;
}
}
/************************************************
*
* Prepare PHD update inputs
*
************************************************/
int n_update = n_in_range*(n_measure+1) + n_measure*n_particles ;
// allocate device memory
CUDA_SAFE_CALL(
hipMalloc( (void**)&dev_maps_inrange,
n_in_range*sizeof(GaussianType) ) ) ;
CUDA_SAFE_CALL(
hipMalloc( (void**)&dev_map_offsets,
(n_particles+1)*sizeof(int) ) ) ;
CUDA_SAFE_CALL(
hipMalloc((void**)&dev_maps_updated,
n_update*sizeof(GaussianType)) ) ;
CUDA_SAFE_CALL(
hipMalloc((void**)&dev_merged_flags,
n_update*sizeof(bool)) ) ;
// copy inputs
CUDA_SAFE_CALL(
hipMemcpy( dev_maps_inrange, &features_in[0],
n_in_range*sizeof(GaussianType),
hipMemcpyHostToDevice )
) ;
CUDA_SAFE_CALL( hipMemcpy( dev_map_offsets, &map_offsets_in[0],
(n_particles+1)*sizeof(int),
hipMemcpyHostToDevice ) ) ;
}
template <class GaussianType>
/**
* @brief pruneMap Prune a gaussian mixture.
*
* The elements of dev_maps whose corresponding flag equals true are removed
* and the resulting array is written back into dev_maps. dev_merged_flags is
* also overwritten with an array of the appropriate number of false elements.
* map_sizes is overwritten with the sizes of the pruned maps
*
* @param dev_maps Device pointer to array of gaussian features
* @param dev_merged_flags Device array of boolean flags, true = prune.
* @param map_sizes Vector of map sizes
* @param n_gaussians Total number of gaussians.
* @return Total number of gaussians after pruning
*/
int
pruneMap(GaussianType*& dev_maps,
bool*& dev_merged_flags,
std::vector<int>& map_sizes,
int n_gaussians){
// wrap pointers in thrust types
thrust::device_ptr<GaussianType> ptr_maps(dev_maps) ;
thrust::device_ptr<bool> ptr_flags(dev_merged_flags) ;
// create the output vector, with same size as the input
thrust::device_vector<GaussianType> dev_pruned(n_gaussians) ;
// do the pruning
thrust::remove_copy_if(ptr_maps,ptr_maps+n_gaussians,
ptr_flags,
dev_pruned.begin(),
thrust::identity<bool>()) ;
// recalculate map sizes
int n_particles = map_sizes.size() ;
std::vector<int> map_sizes_pruned(n_particles,0) ;
host_vector<bool> flags(ptr_flags,ptr_flags+n_gaussians) ;
int n = 0 ;
int n_pruned = 0 ;
for ( int i = 0 ; i < n_particles ; i++){
for( int j = 0 ; j < map_sizes[i] ; j++){
if (!flags[n++]){
map_sizes_pruned[i]++ ;
n_pruned++ ;
}
}
}
// cout << "pruned features: " << endl ;
// for ( int i = 0 ; i < n_pruned ; i++ ){
// GaussianType g = dev_pruned[i] ;
// print_feature(g) ;
// }
// store pruned results
thrust::device_free(ptr_maps) ;
ptr_maps = thrust::device_malloc<GaussianType>(n_pruned) ;
thrust::copy_n(dev_pruned.begin(),n_pruned,ptr_maps) ;
dev_maps = raw_pointer_cast(ptr_maps) ;
thrust::device_free(ptr_flags) ;
ptr_flags = thrust::device_malloc<bool>(n_pruned) ;
thrust::fill(ptr_flags,ptr_flags+n_pruned,false) ;
dev_merged_flags = raw_pointer_cast(ptr_flags) ;
map_sizes = map_sizes_pruned ;
return n_pruned ;
}
template <class GaussianType>
void
mergeAndCopyMaps(GaussianType*& dev_maps_updated,
bool*& dev_merged_flags,
vector<GaussianType> features_out1,
vector<GaussianType> features_out2,
vector<int> n_in_range_vec,
vector<int> n_out_range1_vec,
vector<int> n_out_range2_vec,
int n_particles, int n_measure, int n_update,
vector<vector<GaussianType> >& maps_output )
{
vector<int> map_offsets(n_particles+1) ;
size_t combined_size ;
GaussianType* maps_merged = NULL ;
int* map_sizes_merged = NULL ;
int offset = 0 ;
int offset_updated = 0 ;
int offset_out = 0 ;
// device variables
GaussianType* dev_maps_merged = NULL ;
GaussianType* dev_maps_combined = NULL ;
bool* dev_merged_flags_combined = NULL ;
int* dev_n_merged = NULL ;
int* dev_map_offsets = NULL ;
int n_out_range1 = features_out1.size() ;
int n_out_range2 = features_out2.size() ;
// prune low-weighted features
DEBUG_VAL(n_update) ;
vector<int> map_sizes_inrange(n_particles) ;
for ( int n = 0 ; n < n_particles ; n++){
map_sizes_inrange[n] = n_in_range_vec[n]*(n_measure+1) + n_measure ;
}
int n_pruned = pruneMap(dev_maps_updated,dev_merged_flags,
map_sizes_inrange,n_update) ;
DEBUG_VAL(n_pruned) ;
// recombine updated in-range map with nearly in-range map do merging
DEBUG_MSG("Recombining maps") ;
combined_size = (n_pruned+n_out_range2)*sizeof(GaussianType) ;
CUDA_SAFE_CALL( hipMalloc( (void**)&dev_maps_combined, combined_size ) ) ;
CUDA_SAFE_CALL( hipMalloc( (void**)&dev_merged_flags_combined,
(n_pruned+n_out_range2)*sizeof(bool) ) ) ;
map_offsets[0] = 0 ;
for ( int n = 0 ; n < n_particles ; n++ )
{
// in-range map for particle n
int n_in_range_n = map_sizes_inrange[n] ;
CUDA_SAFE_CALL( hipMemcpy( dev_maps_combined+offset,
dev_maps_updated+offset_updated,
n_in_range_n*sizeof(GaussianType),
hipMemcpyDeviceToDevice) ) ;
CUDA_SAFE_CALL( hipMemcpy( dev_merged_flags_combined+offset,
dev_merged_flags+offset_updated,
n_in_range_n*sizeof(bool)
,hipMemcpyDeviceToDevice ) ) ;
offset += n_in_range_n ;
offset_updated += n_in_range_n ;
// nearly in range map for particle n
vector<char> merged_flags_out(n_out_range2_vec[n],0) ;
CUDA_SAFE_CALL( hipMemcpy( dev_maps_combined+offset,
&features_out2[offset_out],
n_out_range2_vec[n]*sizeof(GaussianType),
hipMemcpyHostToDevice ) ) ;
CUDA_SAFE_CALL( hipMemcpy( dev_merged_flags_combined+offset,
&merged_flags_out[0],
n_out_range2_vec[n]*sizeof(bool),
hipMemcpyHostToDevice) ) ;
offset += n_out_range2_vec[n] ;
offset_out += n_out_range2_vec[n] ;
map_offsets[n+1] = offset ;
}
DEBUG_VAL(combined_size) ;
CUDA_SAFE_CALL( hipMalloc((void**)&dev_maps_merged,
combined_size ) ) ;
CUDA_SAFE_CALL( hipMalloc((void**)&dev_n_merged,
n_particles*sizeof(int) ) ) ;
CUDA_SAFE_CALL( hipMalloc((void**)&dev_map_offsets,
(n_particles+1)*sizeof(int) ) ) ;
CUDA_SAFE_CALL( hipMemcpy( dev_map_offsets, &map_offsets[0],
(n_particles+1)*sizeof(int),
hipMemcpyHostToDevice ) ) ;
CUDA_SAFE_THREAD_SYNC() ;
thrust::device_ptr<bool> ptr_flags(dev_merged_flags_combined) ;
thrust::fill(ptr_flags, ptr_flags+n_pruned+n_out_range2,false) ;
DEBUG_MSG("launching phdUpdateMergeKernel") ;
hipLaunchKernelGGL(( phdUpdateMergeKernel), dim3(n_particles),dim3(256), 0, 0,
dev_maps_combined, dev_maps_merged, dev_n_merged,
dev_merged_flags_combined, dev_map_offsets, n_particles ) ;
CUDA_SAFE_THREAD_SYNC() ;
// // copy one feature and look at it
// GaussianType feature_test ;
// CUDA_SAFE_CALL(hipMemcpy(&feature_test,dev_maps_merged,sizeof(GaussianType),hipMemcpyDeviceToHost) ) ;
// cout << "first merged feature: " << endl ;
// print_feature(feature_test) ;
// allocate outputs
DEBUG_MSG("Allocating update and merge outputs") ;
maps_merged = (GaussianType*)malloc( combined_size ) ;
map_sizes_merged = (int*)malloc( n_particles*sizeof(int) ) ;
// copy outputs
CUDA_SAFE_CALL(
hipMemcpy( maps_merged, dev_maps_merged,
combined_size,
hipMemcpyDeviceToHost ) ) ;
CUDA_SAFE_CALL(
hipMemcpy( map_sizes_merged, dev_n_merged,
n_particles*sizeof(int),
hipMemcpyDeviceToHost ) ) ;
offset_updated = 0 ;
offset_out = 0 ;
for ( int i = 0 ; i < n_particles ; i++ )
{
offset_updated = map_offsets[i] ;
// DEBUG_VAL(map_sizes_merged[i]) ;
maps_output[i].assign(maps_merged+offset_updated,
maps_merged+offset_updated+map_sizes_merged[i]) ;
// recombine with out-of-range features, if any
if ( n_out_range1 > 0 && n_out_range1_vec[i] > 0 )
{
maps_output[i].insert( maps_output[i].end(),
features_out1.begin()+offset_out,
features_out1.begin()+offset_out+n_out_range1_vec[i] ) ;
offset_out += n_out_range1_vec[i] ;
}
// cout << "Merged map " << i << endl ;
// for ( int j = 0 ; j < maps_output[i].size() ; j++ ){
// print_feature(maps_output[i][j]) ;
// }
}
free(maps_merged) ;
free(map_sizes_merged) ;
CUDA_SAFE_CALL( hipFree( dev_maps_combined ) ) ;
CUDA_SAFE_CALL( hipFree( dev_maps_merged ) ) ;
CUDA_SAFE_CALL( hipFree( dev_merged_flags_combined ) ) ;
CUDA_SAFE_CALL( hipFree( dev_n_merged ) ) ;
CUDA_SAFE_CALL( hipFree( dev_maps_updated) ) ;
CUDA_SAFE_CALL( hipFree( dev_merged_flags) ) ;
}
SynthSLAM
phdUpdateSynth(SynthSLAM& particles, measurementSet measurements)
{
//------- Variable Declarations ---------//
int n_measure = 0 ;
int n_particles = particles.n_particles ;
DEBUG_VAL(n_particles) ;
vector<int> map_sizes_static(n_particles,0) ;
vector<int> map_sizes_dynamic(n_particles,0) ;
// map offsets
vector<int> map_offsets_in_static(n_particles+1,0) ;
vector<int> map_offsets_out_static(n_particles+1,0) ;
SynthSLAM particlesPreMerge(particles) ;
// device variables
ConstantVelocityState* dev_poses = NULL ;
int *dev_map_offsets_static = NULL ;
int *dev_map_offsets_dynamic = NULL ;
Gaussian2D* dev_maps_inrange_static = NULL ;
Gaussian4D* dev_maps_inrange_dynamic = NULL ;
Gaussian2D* dev_maps_updated_static = NULL ;
Gaussian4D* dev_maps_updated_dynamic = NULL ;
REAL* dev_particle_weights = NULL ;
bool* dev_merged_flags_static = NULL ;
bool* dev_merged_flags_dynamic = NULL ;
// in/out range book-keeping variables
vector<char> in_range ;
vector<int> n_in_range_vec_static(n_particles,0) ;
vector<int> n_in_range_vec_dynamic(n_particles,0) ;
vector<int> n_out_range1_vec_static(n_particles,0) ;
vector<int> n_out_range1_vec_dynamic(n_particles,0) ;
vector<int> n_out_range2_vec_static(n_particles,0) ;
vector<int> n_out_range2_vec_dynamic(n_particles,0) ;
vector<Gaussian2D> features_in_static ;
vector<Gaussian2D> features_out1_static ;
vector<Gaussian2D> features_out2_static ;
vector<Gaussian4D> features_in_dynamic ;
vector<Gaussian4D> features_out1_dynamic ;
vector<Gaussian4D> features_out2_dynamic ;
// output variables
//------- End Variable Declarations -----//
// make a copy of the particles
particlesPreMerge = particles ;
// check for memory limit for storing measurements in constant mem
n_measure = measurements.size() ;
if ( n_measure > 256 )
{
DEBUG_MSG("Warning: maximum number of measurements per time step exceeded") ;
n_measure = 256 ;
}
DEBUG_VAL(n_measure) ;
// copy measurements to device
CUDA_SAFE_CALL(
hipMemcpyToSymbol( Z, &measurements[0],
n_measure*sizeof(RangeBearingMeasurement) ) ) ;
// copy particle poses to device
CUDA_SAFE_CALL(
hipMalloc( (void**)&dev_poses,
n_particles*sizeof(ConstantVelocityState) ) ) ;
CUDA_SAFE_CALL(
hipMemcpy(dev_poses,&particles.states[0],
n_particles*sizeof(ConstantVelocityState),
hipMemcpyHostToDevice) ) ;
// extract in-range portions of maps, and allocate output arrays
if(config.featureModel==STATIC_MODEL
|| config.featureModel==MIXED_MODEL)
{
prepareUpdateInputs( particles.maps_static,
dev_poses, n_particles, n_measure,
dev_maps_inrange_static, dev_map_offsets_static,
dev_maps_updated_static, dev_merged_flags_static,
features_in_static, features_out1_static,
features_out2_static, n_in_range_vec_static,
n_out_range1_vec_static, n_out_range2_vec_static) ;
}
if(config.featureModel == DYNAMIC_MODEL
|| config.featureModel == MIXED_MODEL)
{
prepareUpdateInputs( particles.maps_dynamic,
dev_poses, n_particles, n_measure,
dev_maps_inrange_dynamic, dev_map_offsets_dynamic,
dev_maps_updated_dynamic, dev_merged_flags_dynamic,
features_in_dynamic, features_out1_dynamic,
features_out2_dynamic, n_in_range_vec_dynamic,
n_out_range1_vec_dynamic,n_out_range2_vec_dynamic) ;
}
// allocate arrays for particle weight update
CUDA_SAFE_CALL(
hipMalloc((void**)&dev_particle_weights,
n_particles*sizeof(REAL) ) ) ;
// launch kernel
int nBlocks = min(n_particles,32768) ;
int n_update_static = features_in_static.size()*(n_measure+1)
+ n_measure*n_particles ;
int n_update_dynamic = features_in_dynamic.size()*(n_measure+1)
+ n_measure*n_particles ;
cudaPrintfInit(4194304) ;
if(config.featureModel == MIXED_MODEL)
{
DEBUG_MSG("launching phdUpdateKernelMixed") ;
hipLaunchKernelGGL(( phdUpdateKernelMixed), dim3(nBlocks),dim3(256), 0, 0,
dev_poses, dev_maps_inrange_static, dev_maps_inrange_dynamic,
dev_map_offsets_static, dev_map_offsets_dynamic,
n_particles,n_measure,
dev_maps_updated_static, dev_maps_updated_dynamic,
dev_merged_flags_static, dev_merged_flags_dynamic,
dev_particle_weights);
CUDA_SAFE_THREAD_SYNC() ;
CUDA_SAFE_CALL( hipFree( dev_maps_inrange_dynamic ) ) ;
CUDA_SAFE_CALL( hipFree( dev_map_offsets_dynamic ) ) ;
}
else if(config.featureModel==STATIC_MODEL)
{
DEBUG_MSG("Computing Birth terms") ;
int n_births = n_particles*n_measure ;
vector<Gaussian2D> births(n_births) ;
for ( int i = 0 ; i < n_particles ; i++){
ConstantVelocityState pose = particles.states[i] ;
for( int j = 0 ; j < n_measure ; j++){
int idx = i*n_measure + j ;
RangeBearingMeasurement z = measurements[j] ;
// invert measurement
REAL theta = pose.ptheta + z.bearing ;
REAL dx = z.range*cos(theta) ;
REAL dy = z.range*sin(theta) ;
births[idx].mean[0] = pose.px + dx ;
births[idx].mean[1] = pose.py + dy ;
// inverse measurement jacobian
REAL J[4] ;
J[0] = dx/z.range ;
J[1] = dy/z.range ;
J[2] = -dy ;
J[3] = dx ;
// measurement noise
REAL var_range = pow(config.stdRange*config.birthNoiseFactor,2) ;
REAL var_bearing = pow(config.stdBearing*config.birthNoiseFactor,2) ;
// compute birth covariance
births[idx].cov[0] = pow(J[0],2)*var_range +
pow(J[2],2)*var_bearing ;
births[idx].cov[1] = J[0]*J[1]*var_range +
J[2]*J[3]*var_bearing ;
births[idx].cov[2] =
births[idx].cov[1] ;
births[idx].cov[3] = pow(J[1],2)*var_range +
pow(J[3],2)*var_bearing ;
// set birth weight
if(z.label==STATIC_MEASUREMENT || !config.labeledMeasurements)
births[idx].weight = safeLog(config.birthWeight) ;
else
births[idx].weight = safeLog(0) ;
// print_feature(births[idx]) ;
}
}
Gaussian2D* dev_births = NULL ;
CUDA_SAFE_CALL(hipMalloc(
(void**)&dev_births,
n_births*sizeof(Gaussian2D))) ;
CUDA_SAFE_CALL(hipMemcpy(
dev_births,&births[0],
n_births*sizeof(Gaussian2D),
hipMemcpyHostToDevice)) ;
DEBUG_MSG("Computing PHD preupdate") ;
// allocate device memory for pre-updated features
int n_features_total = features_in_static.size() ;
int n_preupdate = n_features_total*n_measure ;
DEBUG_VAL(n_preupdate) ;
Gaussian2D* dev_features_preupdate = NULL ;
CUDA_SAFE_CALL(hipMalloc((void**)&dev_features_preupdate,
n_preupdate*sizeof(Gaussian2D))) ;
// create pose index vector
vector<int> pose_idx ;
for (int i = 0 ; i < n_particles ; i++){
pose_idx.insert(pose_idx.end(),n_in_range_vec_static[i],i) ;
}
// for ( int i = 0 ; i < pose_idx.size() ; i++){
// DEBUG_VAL(pose_idx[i]) ;
// }
int* dev_pose_idx = NULL ;
CUDA_SAFE_CALL(hipMalloc((void**)&dev_pose_idx,
n_features_total*sizeof(int))) ;
CUDA_SAFE_CALL(hipMemcpy(dev_pose_idx,&pose_idx[0],
n_features_total*sizeof(int),
hipMemcpyHostToDevice)) ;
// create pd vector
REAL* dev_features_pd = NULL ;
CUDA_SAFE_CALL(hipMalloc((void**)&dev_features_pd,
n_features_total*sizeof(REAL))) ;
// call the preupdate kernel
nBlocks = min(int(ceil(n_features_total/256.0)),65535) ;
DEBUG_VAL(nBlocks) ;
hipLaunchKernelGGL(( preUpdateSynthKernel), dim3(nBlocks),dim3(256), 0, 0,
dev_poses,dev_pose_idx,dev_maps_inrange_static,
dev_features_pd,n_features_total,
n_measure,dev_features_preupdate) ;
CUDA_SAFE_THREAD_SYNC() ;
// // check preupdate terms
// thrust::device_ptr<Gaussian2D> ptr_preupdate(dev_features_preupdate) ;
// thrust::device_vector<Gaussian2D> dev_preupdate(ptr_preupdate,ptr_preupdate+n_preupdate) ;
// thrust::host_vector<Gaussian2D> preupdate(dev_preupdate) ;
// for ( int i = 0 ; i < preupdate.size() ; i++){
// Gaussian2D g = preupdate[i] ;
// print_feature(g) ;
// }
DEBUG_MSG("launching phdUpdateKernel Static") ;
nBlocks = min(n_particles,65535) ;
hipLaunchKernelGGL(( phdUpdateKernel), dim3(nBlocks),dim3(256), 0, 0,
dev_maps_inrange_static, dev_features_pd, dev_features_preupdate,
dev_births, dev_map_offsets_static,n_particles,n_measure,
dev_maps_updated_static,dev_merged_flags_static,
dev_particle_weights ) ;
CUDA_SAFE_THREAD_SYNC() ;
hipFree(dev_births) ;
hipFree(dev_pose_idx) ;
hipFree(dev_features_preupdate) ;
hipFree(dev_features_pd) ;
// // check update terms
// thrust::device_ptr<Gaussian2D> ptr_update(dev_maps_updated_static) ;
// thrust::host_vector<Gaussian2D> update(ptr_update, ptr_update+n_preupdate+n_births+n_features_total) ;
// for ( int i = 0 ; i < update.size() ; i++)
// print_feature(update[i]) ;
}
else if(config.featureModel==DYNAMIC_MODEL)
{
DEBUG_MSG("launching phdUpdateKernel Dynamic") ;
// hipLaunchKernelGGL(( phdUpdateKernel), dim3(nBlocks),dim3(256), 0, 0,
// dev_poses, dev_maps_inrange_dynamic,dev_map_offsets_dynamic,
// n_particles,n_measure,dev_maps_updated_dynamic,
// dev_merged_flags_dynamic,dev_particle_weights) ;
CUDA_SAFE_THREAD_SYNC() ;
}
cudaPrintfDisplay(stdout,false) ;
cudaPrintfEnd();
CUDA_SAFE_CALL( hipFree( dev_maps_inrange_static ) ) ;
CUDA_SAFE_CALL( hipFree( dev_map_offsets_static ) ) ;
// // check input weights against merge flags
// cout << "DEBUG first updated dynamic feature" << endl ;
// bool* merged_flags = (bool*)malloc(n_update_dynamic*sizeof(bool)) ;
// Gaussian4D* maps_updated = (Gaussian4D*)malloc( n_update_dynamic*sizeof(Gaussian4D) ) ;
// hipMemcpy( merged_flags, dev_merged_flags_dynamic, n_update_dynamic*sizeof(bool),hipMemcpyDeviceToHost) ;
// CUDA_SAFE_CALL(
// hipMemcpy( maps_updated, dev_maps_updated_dynamic,
// n_update_dynamic*sizeof(Gaussian4D),
// hipMemcpyDeviceToHost ) ) ;
// for (int j = 0 ; j < n_update_dynamic ; j++)
// {
// cout << "(" << maps_updated[j].weight << " | " << merged_flags[j] << ")" << endl ;
// }
// print_feature(maps_updated[0]) ;
// print_feature(maps_updated[1]) ;
// free(maps_updated) ;
// free(merged_flags) ;
/******************************************************
*
* Merge updated maps and copy back to host
*
******************************************************/
if(config.featureModel==STATIC_MODEL || config.featureModel==MIXED_MODEL)
{
mergeAndCopyMaps( dev_maps_updated_static,dev_merged_flags_static,
features_out1_static,
features_out2_static, n_in_range_vec_static,
n_out_range1_vec_static,
n_out_range2_vec_static, n_particles,
n_measure,n_update_static, particles.maps_static ) ;
}
if(config.featureModel==DYNAMIC_MODEL || config.featureModel==MIXED_MODEL)
{
mergeAndCopyMaps( dev_maps_updated_dynamic,dev_merged_flags_dynamic,
features_out1_dynamic,
features_out2_dynamic, n_in_range_vec_dynamic,
n_out_range1_vec_dynamic,
n_out_range2_vec_dynamic, n_particles,
n_measure,n_update_dynamic,particles.maps_dynamic ) ;
}
/**********************************************************
*
* Update particle weights
*
*********************************************************/
DEBUG_MSG("Updating Particle Weights") ;
REAL* particle_weights = (REAL*)malloc(n_particles*sizeof(REAL)) ;
CUDA_SAFE_CALL( hipMemcpy(particle_weights,dev_particle_weights,
n_particles*sizeof(REAL),
hipMemcpyDeviceToHost ) ) ;
// multiply weights by multi-object likelihood
for ( int i = 0 ; i < n_particles ; i++ )
{
particles.weights[i] += particle_weights[i] ;
}
// normalize
REAL weightSum = logSumExp(particles.weights) ;
DEBUG_VAL(weightSum) ;
for (int i = 0 ; i < n_particles ; i++ )
{
particles.weights[i] -= weightSum ;
// DEBUG_VAL(particles.weights[i]) ;
}
// free memory
CUDA_SAFE_CALL( hipFree( dev_particle_weights ) ) ;
free(particle_weights) ;
CUDA_SAFE_CALL( hipFree( dev_poses ) ) ;
return particlesPreMerge ;
}
//SmcPhdSLAM
//phdUpdate(SmcPhdSLAM& slam, measurementSet measurements)
//{
// SmcPhdStatic maps_static_concat ;
// SmcPhdDynamic maps_dynamic_concat ;
// vector<int> map_sizes_static ;
// vector<int> map_sizes_dynamic ;
// // count map sizes
// int n_particles = slam.n_particles ;
// for (int n = 0 ; n < n_particles ; n++ )
// {
// map_sizes_static.push_back(slam.maps_static[n].x.size());
// map_sizes_dynamic.push_back(slam.maps_dynamic[n].x.size());
// }
//}
template <class GaussianType>
vector<GaussianType> computeExpectedMap(vector<vector <GaussianType> > maps,
vector<REAL> weights)
// concatenate all particle maps into a single slam particle and then call the
// existing gaussian pruning algorithm ;
{
DEBUG_MSG("Computing Expected Map") ;
vector<GaussianType> concat ;
int n_particles = maps.size() ;
int* merged_sizes = (int*)malloc(n_particles*sizeof(int)) ;
int* map_sizes = (int*)malloc(n_particles*sizeof(int)) ;
int total_features = 0 ;
for ( int n = 0 ; n < n_particles ; n++ )
{
vector<GaussianType> map = maps[n] ;
for ( int i = 0 ; i < map.size() ; i++ )
map[i].weight *= exp(weights[n]) ;
concat.insert( concat.end(), map.begin(), map.end() ) ;
merged_sizes[n] = map.size() ;
total_features += map.size() ;
}
if ( total_features == 0 )
{
DEBUG_MSG("no features") ;
vector<GaussianType> expected_map(0) ;
return expected_map ;
}
GaussianType* all_features = (GaussianType*)malloc( total_features*sizeof(GaussianType) ) ;
std::copy( concat.begin(), concat.end(), all_features ) ;
bool* merged_flags = (bool*)malloc( total_features*sizeof(sizeof(GaussianType) ) ) ;
std::fill( merged_flags, merged_flags+total_features, false ) ;
GaussianType* maps_out = (GaussianType*)malloc( total_features*sizeof(GaussianType) ) ;
GaussianType* dev_maps_in = NULL ;
GaussianType* dev_maps_out = NULL ;
int* dev_merged_sizes = NULL ;
bool* dev_merged_flags = NULL ;
int* dev_map_sizes = NULL ;
CUDA_SAFE_CALL( hipMalloc( (void**)&dev_maps_in,
total_features*sizeof(GaussianType) ) ) ;
CUDA_SAFE_CALL( hipMalloc( (void**)&dev_maps_out,
total_features*sizeof(GaussianType) ) ) ;
CUDA_SAFE_CALL( hipMalloc( (void**)&dev_merged_sizes,
n_particles*sizeof(int) ) ) ;
CUDA_SAFE_CALL( hipMalloc( (void**)&dev_map_sizes,
n_particles*sizeof(int) ) ) ;
CUDA_SAFE_CALL( hipMalloc( (void**)&dev_merged_flags,
total_features*sizeof(bool) ) ) ;
for ( int n = n_particles/2 ; n > 0 ; n >>= 1 )
{
DEBUG_VAL(n) ;
for ( int i = 0 ; i < n ; i++ )
map_sizes[i] = merged_sizes[2*i] + merged_sizes[2*i+1] ;
CUDA_SAFE_CALL( hipMemcpy( dev_map_sizes, map_sizes,
n*sizeof(int),
hipMemcpyHostToDevice ) ) ;
CUDA_SAFE_CALL( hipMemcpy( dev_maps_in, all_features,
total_features*sizeof(GaussianType),
hipMemcpyHostToDevice) ) ;
CUDA_SAFE_CALL( hipMemcpy( dev_merged_flags, merged_flags,
total_features*sizeof(bool),
hipMemcpyHostToDevice)) ;
// kernel launch
hipLaunchKernelGGL(( phdUpdateMergeKernel), dim3(n),dim3(256), 0, 0,
dev_maps_in, dev_maps_out, dev_merged_sizes,
dev_merged_flags, dev_map_sizes, n ) ;
CUDA_SAFE_CALL( hipMemcpy( maps_out, dev_maps_out,
total_features*sizeof(GaussianType),
hipMemcpyDeviceToHost) ) ;
CUDA_SAFE_CALL( hipMemcpy( merged_sizes, dev_merged_sizes,
n*sizeof(int), hipMemcpyDeviceToHost ) ) ;
int offset_in = 0 ;
int offset_out = 0 ;
for ( int i = 0 ; i < n ; i++ )
{
int n_copy = merged_sizes[i] ;
std::copy( maps_out+offset_out, maps_out+offset_out+n_copy,
all_features+offset_in) ;
offset_out += map_sizes[i] ;
offset_in += n_copy ;
}
total_features = offset_in ;
}
vector<GaussianType> expected_map(total_features) ;
std::copy( all_features,all_features+total_features, expected_map.begin() ) ;
CUDA_SAFE_CALL( hipFree( dev_maps_in ) ) ;
CUDA_SAFE_CALL( hipFree( dev_maps_out ) ) ;
CUDA_SAFE_CALL( hipFree( dev_merged_sizes ) ) ;
CUDA_SAFE_CALL( hipFree( dev_merged_flags ) ) ;
CUDA_SAFE_CALL( hipFree( dev_map_sizes ) ) ;
free(all_features) ;
free(merged_flags) ;
free(maps_out) ;
return expected_map ;
}
template<class GaussianType>
bool expectedFeaturesPredicate( GaussianType g )
{
return (g.weight <= config.minExpectedFeatureWeight) ;
}
/// copy the configuration structure to constant device memory
void
setDeviceConfig( const SlamConfig& config )
{
CUDA_SAFE_CALL(hipMemcpyToSymbol( dev_config, &config, sizeof(SlamConfig) ) ) ;
// seed_rng();
}
///////////////////////////////////////////////////////////////////
__host__ __device__ void
transformCameraToWorld(REAL xCamera, REAL yCamera, REAL zCamera,
CameraState cam,
REAL& xWorld, REAL& yWorld, REAL& zWorld,
bool isPoint=true){
REAL croll = cos(cam.pose.proll) ;
REAL cpitch = cos(cam.pose.ppitch) ;
REAL cyaw = cos(cam.pose.pyaw) ;
REAL sroll = sin(cam.pose.proll) ;
REAL spitch = sin(cam.pose.ppitch) ;
REAL syaw = sin(cam.pose.pyaw) ;
xWorld = xCamera*(cpitch*cyaw) +
yCamera*(croll*syaw + sroll*spitch*cyaw) +
zCamera*(sroll*syaw - croll*spitch*cyaw) ;
yWorld = xCamera*(-cpitch*syaw) +
yCamera*(croll*cyaw - sroll*spitch*syaw) +
zCamera*(sroll*cyaw + croll*spitch*syaw) ;
zWorld = xCamera*(spitch) +
yCamera*(-sroll*cpitch) +
zCamera*(croll*cpitch) ;
if(isPoint){
xWorld += cam.pose.px ;
yWorld += cam.pose.py ;
zWorld += cam.pose.pz ;
}
}
__host__ __device__ void
transformWorldToCamera(REAL xWorld, REAL yWorld, REAL zWorld,
CameraState cam,
REAL& xCamera, REAL& yCamera, REAL& zCamera,
bool isPoint=true){
REAL croll = cos(cam.pose.proll) ;
REAL cpitch = cos(cam.pose.ppitch) ;
REAL cyaw = cos(cam.pose.pyaw) ;
REAL sroll = sin(cam.pose.proll) ;
REAL spitch = sin(cam.pose.ppitch) ;
REAL syaw = sin(cam.pose.pyaw) ;
xCamera = xWorld*(cpitch*cyaw) +
yWorld*(-cpitch*syaw) +
zWorld*(spitch) ;
yCamera = xWorld*(croll*syaw + sroll*spitch*cyaw) +
yWorld*(croll*cyaw - sroll*spitch*syaw) +
zWorld*(-sroll*cpitch) ;
zCamera = (xWorld)*(sroll*syaw - croll*spitch*cyaw) +
(yWorld)*(sroll*cyaw + croll*spitch*syaw) +
(zWorld)*(croll*cpitch) ;
if(isPoint){
xCamera += -cam.pose.px*(cpitch*cyaw) -
cam.pose.py*(-cpitch*syaw) -
cam.pose.pz*spitch ;
yCamera += -cam.pose.px*(croll*syaw + sroll*spitch*cyaw) -
cam.pose.py*(croll*cyaw - sroll*spitch*syaw) -
cam.pose.pz*(-sroll*cpitch) ;
zCamera += -cam.pose.px*(sroll*syaw - croll*spitch*cyaw) -
cam.pose.py*(sroll*cyaw + croll*spitch*syaw) -
cam.pose.pz*(croll*cpitch) ;
}
}
/// functor for use with thrust::transform to convert particles in euclidean
/// space to particles in disparity space (baseline = 1)
/** pass a vector of camera states to the constructor
* the argument to the functor is an 8-element tuple, where each element is a
* vector with one element per feature particle. The first four elements are
inputs:
* idx: index to the vector of camera states indicating to which camera
this particle belongs
x: x-coordinate of the particle
y: y-coordinate of the particle
z: z-coordinate of the particle
The last 4 elements are outputs computed by the functor:
u: u-coordinate of particle in disparity space
v: v-coordinate of particle in disparity space
d: disparity value of particle in disparity space
out_of_range: 1 if the particle is not visible to the camera
**/
struct world_to_disparity_transform{
const CameraState* camera_states ;
world_to_disparity_transform(CameraState* _states) : camera_states(_states) {}
template <typename Tuple>
__host__ __device__ void
operator()(Tuple t){
using namespace thrust ;
CameraState cam = camera_states[get<0>(t)] ;
REAL x = get<1>(t) ;
REAL y = get<2>(t) ;
REAL z = get<3>(t) ;
REAL xCamera = 0 ;
REAL yCamera = 0 ;
REAL zCamera = 0 ;
transformWorldToCamera(x,y,z,cam,xCamera,yCamera,zCamera) ;
get<4>(t) = cam.u0 - cam.fx*xCamera/zCamera ;
get<5>(t) = cam.v0 - cam.fy*yCamera/zCamera ;
get<6>(t) = -cam.fx/zCamera ;
bool in_fov = (get<4>(t) > 0) &&
(get<4>(t) < dev_config.imageWidth) &&
(get<5>(t) > 0) &&
(get<5>(t) < dev_config.imageHeight) &&
(get<6>(t) >= 0);
get<7>(t) = in_fov ? 1 : 0 ;
}
};
/// functor for use with thrust::transform to convert particles in disparity
/// space to particles in euclidean space (baseline = 1)
/** pass a vector of camera states to the constructor
* the argument to the functor is an 7-element tuple, where each element is a
* vector with one element per feature particle. The first four elements are
inputs:
* idx: index to the vector of camera states indicating to which camera
this particle belongs
u: u-coordinate of particle in disparity space
v: v-coordinate of particle in disparity space
d: disparity value of particle in disparity space
The last 3 elements are outputs computed by the functor:
x: x-coordinate of the particle
y: y-coordinate of the particle
z: z-coordinate of the particle
**/
struct disparity_to_world_transform{
const CameraState* camera_states ;
disparity_to_world_transform(CameraState* _states) : camera_states(_states) {}
template <typename Tuple>
__host__ __device__ void
operator()(Tuple t){
CameraState cam = camera_states[get<0>(t)] ;
REAL u = get<1>(t) ;
REAL v = get<2>(t) ;
REAL d = get<3>(t) ;
REAL xCamera = (u-cam.u0)/d ;
REAL yCamera = cam.fx/cam.fy*(v-cam.v0)/d ;
REAL zCamera = -cam.fx/d ;
transformCameraToWorld(xCamera,yCamera,zCamera,cam,
get<4>(t),get<5>(t),get<6>(t)) ;
}
};
/// this is a binary function which returns the sum of two numerical values
/// divided by an integer N. This can be used to compute the arithmetic mean of
/// N numbers by reduction.
struct compute_mean_function{
const int N ;
compute_mean_function(int _n) : N(_n) {}
template <typename T>
__host__ __device__ REAL
operator()(T x, T y){
return (REAL)(x+y)/(REAL)N ;
}
};
/// unary operator which multiplies the argument by a constant
template <typename T>
struct multiply_by : public thrust::unary_function<T,T>
{
const T N ;
multiply_by(T _n) : N(_n) {}
__host__ __device__ T
operator()(T x){return x*N ;}
};
/// unary operator which divides the argument by a constant
template <typename T>
struct divide_by : public thrust::unary_function<T,T>
{
const T N ;
divide_by(T _n) : N(_n) {}
__host__ __device__ T
operator()(T x){return x/N ;}
};
/// unary operator which returns the weight of a gaussian object
template <typename T>
struct get_weight : public thrust::unary_function<T,REAL>
{
__device__ REAL
operator()(T g){ return g.weight; }
} ;
struct gt0 : public thrust::unary_function<REAL,bool>
{
__host__ __device__ bool
operator()(const REAL x){return (x>0);}
} ;
struct leq0 : public thrust::unary_function<REAL,bool>
{
__host__ __device__ bool
operator()(const REAL x){return (x<=0);}
} ;
// create predicate for testing feature visiblity
struct is_inrange : public thrust::unary_function<Gaussian3D,bool>
{
__host__ __device__ bool
operator()(const Gaussian3D g){
REAL u = g.mean[0] ;
REAL v = g.mean[1] ;
REAL d = g.mean[2] ;
bool in_fov = (u > 0) &&
(u <= dev_config.imageWidth) &&
(v >= 0) &&
(v <= dev_config.imageHeight) &&
(d >= 0);
return in_fov ;
}
};
__global__ void
fitGaussiansKernel(REAL* uArray, REAL* vArray, REAL* dArray,
REAL* weights,int nGaussians,
Gaussian3D* gaussians){
int tid = threadIdx.x ;
__shared__ REAL sdata[256] ;
for (int i = blockIdx.x ; i < nGaussians ; i+=gridDim.x){
int nParticles = dev_config.particlesPerFeature ;
int offset = i*nParticles ;
REAL val = 0 ;
// compute mean u
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += uArray[offset+j] ;
}
sumByReduction(sdata,val,tid);
REAL uMean = sdata[0]/nParticles ;
__syncthreads() ;
// compute mean v
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += vArray[offset+j] ;
}
sumByReduction(sdata,val,tid);
REAL vMean = sdata[0]/nParticles ;
__syncthreads() ;
// compute mean d
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += dArray[offset+j] ;
}
sumByReduction(sdata,val,tid);
REAL dMean = sdata[0]/nParticles ;
__syncthreads() ;
// write means to output
if (tid == 0){
// cuPrintf("%f %f %f\n",uMean,vMean,dMean) ;
gaussians[i].weight = weights[i] ;
gaussians[i].mean[0] = uMean ;
gaussians[i].mean[1] = vMean ;
gaussians[i].mean[2] = dMean ;
}
// covariance term u-u
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += pow(uArray[offset+j]-uMean,2) ;
}
sumByReduction(sdata,val,tid);
if (tid == 0)
gaussians[i].cov[0] = sdata[0]/(nParticles-1) ;
__syncthreads() ;
// covariance term v-v
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += pow(vArray[offset+j]-vMean,2) ;
}
sumByReduction(sdata,val,tid);
if (tid == 0)
gaussians[i].cov[4] = sdata[0]/(nParticles-1) ;
__syncthreads() ;
// covariance term d-d
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += pow(dArray[offset+j]-dMean,2) ;
}
sumByReduction(sdata,val,tid);
if (tid == 0)
gaussians[i].cov[8] = sdata[0]/(nParticles-1) ;
__syncthreads() ;
// covariance term u-v
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += (uArray[offset+j]-uMean)*(vArray[offset+j]-vMean) ;
}
sumByReduction(sdata,val,tid);
if (tid == 0){
gaussians[i].cov[1] = sdata[0]/(nParticles-1) ;
gaussians[i].cov[3] = gaussians[i].cov[1] ;
}
__syncthreads() ;
// covariance term u-d
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += (uArray[offset+j]-uMean)*(dArray[offset+j]-dMean) ;
}
sumByReduction(sdata,val,tid);
if (tid == 0){
gaussians[i].cov[2] = sdata[0]/(nParticles-1) ;
gaussians[i].cov[6] = gaussians[i].cov[2] ;
}
__syncthreads() ;
// covariance term v-d
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += (vArray[offset+j]-vMean)*(dArray[offset+j]-dMean) ;
}
sumByReduction(sdata,val,tid);
if (tid == 0){
gaussians[i].cov[5] = sdata[0]/(nParticles-1) ;
gaussians[i].cov[7] = gaussians[i].cov[5] ;
}
__syncthreads() ;
}
}
__global__ void
sampleGaussiansKernel(Gaussian3D* gaussians, int n_gaussians,
RngState* seeds,REAL* samples){
int tid = blockIdx.x*blockDim.x + threadIdx.x ;
if (tid < dev_config.particlesPerFeature){
// initialize this thread's random number generator
RngState random_state = seeds[tid] ;
float x1,x2,x3,x_extra ;
float2 randnorms ;
bool odd_iteration = false ;
int idx_result = tid ;
int step = dev_config.particlesPerFeature*n_gaussians ;
// loop over gaussians
for (int n = 0 ; n < n_gaussians ; n++){
// cholesky decomposition of covariance matrix
REAL L11 = sqrt(gaussians[n].cov[0]) ;
REAL L21 = gaussians[n].cov[1]/L11 ;
REAL L22 = sqrt(gaussians[n].cov[4]-pow(L21,2)) ;
REAL L31 = gaussians[n].cov[2]/L11 ;
REAL L32 = (gaussians[n].cov[5]-L31*L21)/L22 ;
REAL L33 = sqrt(gaussians[n].cov[8] - pow(L31,2) - pow(L32,2)) ;
// generate uncorrelated normally distributed random values
randnorms = randn(random_state) ;
x1 = randnorms.x ;
x2 = randnorms.y ;
// the box-muller transform gives us normal variates two at a time,
// but we only need 3, so on even iterations, we call the transform
// twice and save the extra value to use in the next iteration.
if ( !odd_iteration ){
randnorms = randn(random_state) ;
x3 = randnorms.x ;
x_extra = randnorms.y ;
odd_iteration = true ;
}
else
{
x3 = x_extra ;
odd_iteration = false ;
}
// multiply uncorrelated values by cholesky decomposition and add
// mean
samples[idx_result] = x1*L11 + gaussians[n].mean[0] ;
samples[idx_result+step] = x1*L21 + x2*L22 + gaussians[n].mean[1] ;
samples[idx_result+2*step] = x1*L31 + x2*L32 + x3*L33 + gaussians[n].mean[2] ;
idx_result += dev_config.particlesPerFeature ;
}
}
}
__global__ void
preUpdateDisparityKernel(Gaussian3D* features_predict,
REAL* features_pd,
int n_features,
ImageMeasurement* Z, int n_measure,
Gaussian3D* features_preupdate){
int tid = blockIdx.x*blockDim.x + threadIdx.x ;
for ( int i = tid ; i < n_features ; i+=gridDim.x*blockDim.x){
Gaussian3D feature = features_predict[i] ;
REAL pd = features_pd[i] ;
// innovation covariance
REAL sigma[4] ;
REAL sigma_inv[4] ;
REAL varU = pow(dev_config.stdU,2) ;
REAL varV = pow(dev_config.stdV,2) ;
sigma[0] = feature.cov[0] + varU ;
sigma[1] = feature.cov[1] ;
sigma[2] = feature.cov[3] ;
sigma[3] = feature.cov[4] + varV ;
invert_matrix2(sigma,sigma_inv) ;
REAL det_sigma = sigma[0]*sigma[3] - sigma[1]*sigma[2] ;
REAL K[6] ;
K[0] = feature.cov[0]*sigma_inv[0] + feature.cov[3]*sigma_inv[1] ;
K[1] = feature.cov[1]*sigma_inv[0] + feature.cov[4]*sigma_inv[1] ;
K[2] = feature.cov[2]*sigma_inv[0] + feature.cov[5]*sigma_inv[1] ;
K[3] = feature.cov[0]*sigma_inv[2] + feature.cov[3]*sigma_inv[3] ;
K[4] = feature.cov[1]*sigma_inv[2] + feature.cov[4]*sigma_inv[3] ;
K[5] = feature.cov[2]*sigma_inv[2] + feature.cov[5]*sigma_inv[3] ;
// Maple-generated code for P = (IHK)*P*(IHK)' + KRK
REAL cov_preupdate[9] ;
cov_preupdate[0] = (1 - K[0]) * (feature.cov[0] * (1 - K[0]) - feature.cov[3] * K[3]) - K[3] * (feature.cov[1] * (1 - K[0]) - feature.cov[4] * K[3]) + varU * pow( K[0], 2) + varV * pow( K[3], 2);
cov_preupdate[1] = -K[1] * (feature.cov[0] * (1 - K[0]) - feature.cov[3] * K[3]) + (1 - K[4]) * (feature.cov[1] * (1 - K[0]) - feature.cov[4] * K[3]) + K[0] * varU * K[1] + K[3] * varV * K[4];
cov_preupdate[2] = -K[2] * (feature.cov[0] * (1 - K[0]) - feature.cov[3] * K[3]) - K[5] * (feature.cov[1] * (1 - K[0]) - feature.cov[4] * K[3]) + feature.cov[2] * (1 - K[0]) - feature.cov[5] * K[3] + K[0] * varU * K[2] + K[3] * varV * K[5];
cov_preupdate[3] = (1 - K[0]) * (-feature.cov[0] * K[1] + feature.cov[3] * (1 - K[4])) - K[3] * (-feature.cov[1] * K[1] + feature.cov[4] * (1 - K[4])) + K[0] * varU * K[1] + K[3] * varV * K[4];
cov_preupdate[4] = -K[1] * (-feature.cov[0] * K[1] + feature.cov[3] * (1 - K[4])) + (1 - K[4]) * (-feature.cov[1] * K[1] + feature.cov[4] * (1 - K[4])) + varU * pow( K[1], 2) + varV * pow( K[4], 2);
cov_preupdate[5] = -K[2] * (-feature.cov[0] * K[1] + feature.cov[3] * (1 - K[4])) - K[5] * (-feature.cov[1] * K[1] + feature.cov[4] * (1 - K[4])) - feature.cov[2] * K[1] + feature.cov[5] * (1 - K[4]) + K[1] * varU * K[2] + K[4] * varV * K[5];
cov_preupdate[6] = (1 - K[0]) * (-feature.cov[0] * K[2] - feature.cov[3] * K[5] + feature.cov[6]) - K[3] * (-feature.cov[1] * K[2] - feature.cov[4] * K[5] + feature.cov[7]) + K[0] * varU * K[2] + K[3] * varV * K[5];
cov_preupdate[7] = -K[1] * (-feature.cov[0] * K[2] - feature.cov[3] * K[5] + feature.cov[6]) + (1 - K[4]) * (-feature.cov[1] * K[2] - feature.cov[4] * K[5] + feature.cov[7]) + K[1] * varU * K[2] + K[4] * varV * K[5];
cov_preupdate[8] = -K[2] * (-feature.cov[0] * K[2] - feature.cov[3] * K[5] + feature.cov[6]) - K[5] * (-feature.cov[1] * K[2] - feature.cov[4] * K[5] + feature.cov[7]) - feature.cov[2] * K[2] - feature.cov[5] * K[5] + feature.cov[8] + varU * pow( K[2], 2) + varV * pow( K[5], 2);
// end maple code
for ( int m = 0 ; m < n_measure ; m++){
int preupdate_idx = m*n_features + i ;
REAL innov[2] ;
innov[0] = Z[m].u - feature.mean[0] ;
innov[1] = Z[m].v - feature.mean[1] ;
REAL dist = innov[0]*innov[0]*sigma_inv[0] +
innov[0]*innov[1]*(sigma_inv[1]+sigma_inv[2]) +
innov[1]*innov[1]*sigma_inv[3] ;
REAL log_weight = safeLog(pd) + safeLog(feature.weight)
- 0.5*dist - safeLog(2*M_PI) - 0.5*safeLog(det_sigma) ;
features_preupdate[preupdate_idx].weight = log_weight ;
features_preupdate[preupdate_idx].mean[0] = feature.mean[0] +
innov[0]*K[0] + innov[1]*K[3] ;
features_preupdate[preupdate_idx].mean[1] = feature.mean[1] +
innov[0]*K[1] + innov[1]*K[4] ;
features_preupdate[preupdate_idx].mean[2] = feature.mean[2] +
innov[0]*K[2] + innov[1]*K[5] ;
for ( int n = 0 ; n < 9 ; n++ )
features_preupdate[preupdate_idx].cov[n] = cov_preupdate[n] ;
}
}
}
/**
* @brief separateDisparityFeatures Separate features into in-range and out-of-range parts
* @param features_all[in] vector of disparity space gaussian features
* @param offsets_all[in] indexing offsets to \p features_all
* @param particles_all[in] vector of ParticleMaps corresponding to \p features_all
* @param features_in[out] vector in-range disparity space gaussian features
* @param offsets_in[out] indxing offsets to \p features_in
* @param particles_out[out] vector of ParticleMaps containing the 3D particles
* for out-of-range features
*/
void separateDisparityFeatures(device_vector<Gaussian3D> features_all,
host_vector<int> offsets_all,
vector<ParticleMap> particles_all,
device_vector<Gaussian3D>& features_in,
host_vector<int>& offsets_in,
vector<ParticleMap>& particles_out)
{
// make sure the output arrays are of sufficient size
features_in.resize(features_all.size());
offsets_in.resize(offsets_all.size());
particles_out.resize(particles_all.size());
// initialize the out-of-range particles to be empty
for (int n = 0 ; n < particles_out.size() ; n++){
particles_out[n].weights.clear();
particles_out[n].x.clear();
particles_out[n].y.clear();
particles_out[n].z.clear();
}
// compute the in-range mask
device_vector<bool> dev_inrange_mask(features_all.size()) ;
DEBUG_MSG("transform") ;
thrust::transform(features_all.begin(),features_all.end(),
dev_inrange_mask.begin(),is_inrange()) ;
host_vector<bool> inrange_mask = dev_inrange_mask ;
// do the separation
DEBUG_MSG("copy_if") ;
thrust::copy_if(features_all.begin(),features_all.end(),
features_in.begin(),
is_inrange()) ;
if(config.debug){
for ( int i = 0 ; i < inrange_mask.size() ; i++){
std::cout << inrange_mask[i] << " " ;
if (i % 20 == 0 && i > 0)
std::cout << std::endl ;
}
std::cout << std::endl ;
}
// compute the separated offset arrays and copy out-of-range 3d particles
int map_idx = 0 ;
int feature_idx = 0 ;
int start_particles = 0 ;
int stop_particles = config.particlesPerFeature ;
int offset_total = 0 ;
DEBUG_MSG("compute offsets") ;
for ( int i = 0 ; i < inrange_mask.size() ; i++ ){
// check if we have crossed over to the next map
if( i >= offsets_all[map_idx+1] )
{
map_idx++ ;
offsets_in[map_idx] = offset_total ;
start_particles = 0 ;
stop_particles = config.particlesPerFeature ;
feature_idx = 0 ;
}
// DEBUG_VAL(start_particles) ;
// DEBUG_VAL(stop_particles) ;
// DEBUG_VAL(feature_idx) ;
// DEBUG_VAL(map_idx) ;
// DEBUG_VAL(inrange_mask[i]) ;
if (inrange_mask[i])
{
offset_total++ ;
}
else{
particles_out[map_idx].x.insert(particles_out[map_idx].x.end(),
&particles_all[map_idx].x[start_particles],
&particles_all[map_idx].x[stop_particles]) ;
particles_out[map_idx].y.insert(particles_out[map_idx].y.end(),
&particles_all[map_idx].y[start_particles],
&particles_all[map_idx].y[stop_particles]) ;
particles_out[map_idx].z.insert(particles_out[map_idx].z.end(),
&particles_all[map_idx].z[start_particles],
&particles_all[map_idx].z[stop_particles]) ;
particles_out[map_idx].weights.push_back(particles_all[map_idx].weights[feature_idx]);
}
start_particles += config.particlesPerFeature ;
stop_particles += config.particlesPerFeature ;
feature_idx++ ;
}
map_idx++ ;
offsets_in[map_idx] = offset_total ;
// shrink the output arrays to fit data
DEBUG_MSG("Shrink features_in") ;
DEBUG_VAL(offsets_in.back()) ;
features_in.resize(offsets_in.back());
DEBUG_MSG("shrink_to_fit") ;
features_in.shrink_to_fit();
}
/**
* @brief recombineFeatures Merge in-range and out-of-range features into a
* single feature vector
* @param features_in[in] vector of in-range features
* @param offsets_in[in] vector of indexing offsets for in-range features
* @param features_out[in] vector of out-of-range features
* @param offsets_out[in] vector of indexing offsets for out-of-range features
* @param features_all[out] vector where merged result will be written
* @param offsets_all[out] indexing offsets for merged features
*/
void recombineFeatures(device_vector<Gaussian3D> features_in,
host_vector<int> offsets_in,
device_vector<Gaussian3D> features_out,
host_vector<int> offsets_out,
device_vector<Gaussian3D> features_all,
host_vector<int> offsets_all){
// allocate space for outputs
features_all.resize(features_in.size()+features_out.size());
offsets_all.resize(offsets_in.size());
device_vector<Gaussian3D>::iterator it_result = features_all.begin() ;
// merge vectors map-by-map
offsets_all[0] = 0 ;
for ( int n = 0 ; n < offsets_in.size() ; n++ ){
int start_in = offsets_in[n] ;
int stop_in = offsets_in[n+1] ;
it_result = thrust::copy(&features_in[start_in],
&features_in[stop_in],
it_result) ;
int start_out = offsets_out[n] ;
int stop_out = offsets_out[n+1] ;
it_result = thrust::copy(&features_out[start_out],
&features_out[stop_out],
it_result) ;
offsets_all[n+1] = stop_in + stop_out ;
}
}
void
disparityPredict(DisparitySLAM& slam){
DEBUG_MSG("Performing prediction") ;
host_vector<CameraState> states = slam.states ;
int n_states = states.size() ;
vector<REAL> noise_x(n_states) ;
vector<REAL> noise_y(n_states) ;
vector<REAL> noise_z(n_states) ;
vector<REAL> noise_roll(n_states) ;
vector<REAL> noise_pitch(n_states) ;
vector<REAL> noise_yaw(n_states) ;
for (int i = 0 ; i < n_states ; i++){
noise_x[i] = randn()*config.ax ;
noise_y[i] = randn()*config.ay ;
noise_z[i] = randn()*config.az ;
noise_roll[i] = randn()*config.aroll ;
noise_pitch[i] = randn()*config.apitch ;
noise_yaw[i] = randn()*config.ayaw ;
}
REAL dt = config.dt ;
for (int i = 0 ; i < n_states ; i++ ){
ConstantVelocityState3D pose = slam.states[i].pose ;
REAL dx = dt*pose.vx + 0.5*noise_x[i]*pow(dt,2) ;
REAL dy = dt*pose.vy + 0.5*noise_y[i]*pow(dt,2) ;
REAL dz = dt*pose.vz + 0.5*noise_z[i]*pow(dt,2) ;
REAL dx_world = 0 ;
REAL dy_world = 0 ;
REAL dz_world = 0 ;
transformCameraToWorld(dx,dy,dz,slam.states[i],
dx_world,dy_world,dz_world,false);
pose.px += dx_world ;
pose.py += dy_world ;
pose.pz += dz_world ;
pose.proll += dt*pose.vroll + 0.5*noise_roll[i]*pow(dt,2) ;
pose.ppitch += dt*pose.vpitch + 0.5*noise_pitch[i]*pow(dt,2) ;
pose.pyaw += dt*pose.vyaw + 0.5*noise_yaw[i]*pow(dt,2) ;
pose.vx += dt*noise_x[i] ;
pose.vy += dt*noise_y[i] ;
pose.vz += dt*noise_z[i] ;
pose.vroll += dt*noise_roll[i] ;
pose.vpitch += dt*noise_pitch[i] ;
pose.vyaw += dt*noise_yaw[i] ;
pose.proll = wrapAngle(pose.proll) ;
pose.ppitch = wrapAngle(pose.ppitch) ;
pose.pyaw = wrapAngle(pose.pyaw) ;
slam.states[i].pose = pose ;
}
}
void
disparityUpdate(DisparitySLAM& slam,
std::vector<ImageMeasurement> Z){
host_vector<ImageMeasurement> measurements = Z ;
// DEBUG_MSG("Received measurements: ") ;
// for ( int i = 0 ; i < Z.size() ; i++ ){
// cout << Z[i].u << "," << Z[i].v << endl ;
// }
// vector which contains the camera state to which each particle belongs
host_vector<int> camera_idx_vector ;
// vectors to contain concatenated particles
host_vector<REAL> x_vector ;
host_vector<REAL> y_vector ;
host_vector<REAL> z_vector ;
// vector to contain camera states
host_vector<CameraState> camera_vector = slam.states ;
if(config.debug){
DEBUG_MSG("Camera states: ") ;
for ( int n = 0 ; n < camera_vector.size() ; n++ ){
CameraState cam = camera_vector[n] ;
cout << n << " " << cam.pose.px << ","
<< cam.pose.py << ","
<< cam.pose.pz << ","
<< cam.pose.proll << ","
<< cam.pose.ppitch << ","
<< cam.pose.pyaw << endl ;
}
}
// vector of map sizes
host_vector<int> map_offsets(slam.n_particles+1) ;
map_offsets[0] = 0 ;
// vector feature weights
host_vector<REAL> feature_weights ;
for ( int n = 0 ; n < slam.n_particles ; n++ ) {
ParticleMap map_n = slam.maps[n] ;
int n_particles = map_n.x.size() ;
map_offsets[n+1] = map_offsets[n] + n_particles/config.particlesPerFeature ;
camera_idx_vector.insert(camera_idx_vector.end(),
n_particles,
n) ;
x_vector.insert(x_vector.end(),map_n.x.begin(),map_n.x.end()) ;
y_vector.insert(y_vector.end(),map_n.y.begin(),map_n.y.end()) ;
z_vector.insert(z_vector.end(),map_n.z.begin(),map_n.z.end()) ;
feature_weights.insert(feature_weights.end(),
map_n.weights.begin(),
map_n.weights.end()) ;
}
// create device vectors
int n_particles_total = x_vector.size() ;
int n_features_total = n_particles_total/config.particlesPerFeature ;
device_vector<CameraState> dev_camera_vector = camera_vector ;
device_vector<int> dev_camera_idx_vector = camera_idx_vector ;
device_vector<REAL> dev_x_vector = x_vector ;
device_vector<REAL> dev_y_vector = y_vector ;
device_vector<REAL> dev_z_vector = z_vector ;
device_vector<REAL> dev_u_vector(n_particles_total) ;
device_vector<REAL> dev_v_vector(n_particles_total) ;
device_vector<REAL> dev_d_vector(n_particles_total) ;
device_vector<REAL> dev_inrange_vector(n_particles_total) ;
// do the transformation
DEBUG_MSG("Performing world to disparity transformation") ;
thrust::for_each(make_zip_iterator(make_tuple(
dev_camera_idx_vector.begin(),
dev_x_vector.begin(),
dev_y_vector.begin(),
dev_z_vector.begin(),
dev_u_vector.begin(),
dev_v_vector.begin(),
dev_d_vector.begin(),
dev_inrange_vector.begin()
)),
make_zip_iterator(make_tuple(
dev_camera_idx_vector.end(),
dev_x_vector.end(),
dev_y_vector.end(),
dev_z_vector.end(),
dev_u_vector.end(),
dev_v_vector.end(),
dev_d_vector.end(),
dev_inrange_vector.end()
)),
world_to_disparity_transform(raw_pointer_cast(&dev_camera_vector[0]))) ;
// DEBUG_MSG("Disparity-transformed particles: ") ;
// DEBUG_MSG("First map: ") ;
// for( int j = 0 ; j < slam.maps[0].x.size() ; j++ ){
// cout << dev_u_vector[j] << ","
// << dev_v_vector[j] << ","
// << dev_d_vector[j] << endl ;
// }
// DEBUG_MSG("Second map: ") ;
// for( int j = 0 ; j < slam.maps[1].x.size() ; j++ ){
// cout << dev_u_vector[j+slam.maps[0].x.size()] << ","
// << dev_v_vector[j+slam.maps[0].x.size()] << ","
// << dev_d_vector[j+slam.maps[0].x.size()] << endl ;
// }
// // generate the keys for grouping particles into features
// host_vector<int> feature_keys ;
//
// DEBUG_VAL(n_features_total) ;
// for ( int n = 0 ; n < n_features_total ; n++ ){
// feature_keys.insert(feature_keys.end(),config.particlesPerFeature,n) ;
// }
// // compute pd for each gaussian feature
// DEBUG_MSG("Computing Pd") ;
// device_vector<int> dev_feature_keys = feature_keys ;
// device_vector<int> dev_keys_out(n_particles_total) ;
// device_vector<REAL> dev_pd(n_particles_total) ;
// // sum the in-range values of all particles per feature
// reduce_by_key(dev_feature_keys.begin(),dev_feature_keys.end(),
// dev_inrange_vector.begin(),
// dev_keys_out.begin(),dev_pd.begin()) ;
// // divide the sum by the number of particles per feature
// divide_by<REAL> division_op((REAL)config.particlesPerFeature) ;
// thrust::transform(dev_pd.begin(),dev_pd.end(),dev_pd.begin(),division_op) ;
// // multiply by nominal pd value
// multiply_by<REAL> multiply_op(config.pd) ;
// thrust::transform(dev_pd.begin(),dev_pd.end(),dev_pd.begin(),multiply_op) ;
// if(config.debug){
// DEBUG_MSG("Computed Pd for first particle:") ;
// for ( int j = 0 ; j < slam.maps[0].weights.size() ; j++ ){
// cout << dev_pd[j] << endl ;
// }
// }
// if (n_particles_total > 0 && config.debug){
// DEBUG_MSG("Verify disparity space particles: ");
// for (int j = 0 ; j < config.particlesPerFeature ; j++){
// cout << dev_u_vector[j] << "," << dev_v_vector[j] << "," << dev_d_vector[j] << endl ;
// }
// }
// DEBUG_MSG("Separate in-range and outside-range features") ;
// int k = 0 ;
// host_vector<REAL> pd_vector = dev_pd ;
// device_vector<REAL> dev_u_inrange ;
// host_vector<REAL> u_outrange ;
// device_vector<REAL> dev_v_inrange ;
// host_vector<REAL> v_outrange ;
// device_vector<REAL> dev_d_inrange ;
// host_vector<REAL> d_outrange ;
// host_vector<int> map_offsets_inrange(slam.n_particles+1,0) ;
// host_vector<int> map_offsets_outrange(slam.n_particles+1,0) ;
// int n_features_inrange = 0 ;
// DEBUG_MSG("particles...") ;
// for (int i = 0 ; i < slam.n_particles ; i++ ){
// int n_features = map_offsets[i+1]-map_offsets[i] ;
// map_offsets_inrange[i+1] = map_offsets_inrange[i] ;
// map_offsets_outrange[i+1] = map_offsets_outrange[i] ;
// for ( int j = 0 ; j < n_features ; j++ ){
// int offset_begin = k*config.particlesPerFeature ;
// int offset_end = offset_begin + config.particlesPerFeature ;
// if(pd_vector[k] > 0){
// dev_u_inrange.insert(dev_u_inrange.end(),
// dev_u_vector.begin()+offset_begin,
// dev_u_vector.begin()+offset_end) ;
// dev_v_inrange.insert(dev_v_inrange.end(),
// dev_v_vector.begin()+offset_begin,
// dev_v_vector.begin()+offset_end) ;
// dev_d_inrange.insert(dev_d_inrange.end(),
// dev_d_vector.begin()+offset_begin,
// dev_d_vector.begin()+offset_end) ;
// map_offsets_inrange[i+1]++ ;
// n_features_inrange++ ;
// }
// else{
// u_outrange.insert(u_outrange.end(),
// dev_u_vector.begin()+offset_begin,
// dev_u_vector.begin()+offset_end) ;
// v_outrange.insert(v_outrange.end(),
// dev_v_vector.begin()+offset_begin,
// dev_v_vector.begin()+offset_end) ;
// d_outrange.insert(d_outrange.end(),
// dev_d_vector.begin()+offset_begin,
// dev_d_vector.begin()+offset_end) ;
// map_offsets_outrange[i+1]++ ;
// }
// k++ ;
// }
// }
// DEBUG_MSG("weights...") ;
// host_vector<REAL> feature_weights_inrange(n_features_total) ;
// host_vector<REAL> feature_weights_outrange(n_features_total) ;
// host_vector<REAL> pd_inrange(n_features_total) ;
// DEBUG_MSG("copy inrange weights...") ;
// thrust::copy_if(feature_weights.begin(),
// feature_weights.end(),
// pd_vector.begin(),
// feature_weights_inrange.begin(),
// gt0()) ;
// DEBUG_MSG("copy outrange weights...") ;
// thrust::copy_if(feature_weights.begin(),
// feature_weights.end(),
// pd_vector.begin(),
// feature_weights_outrange.begin(),
// leq0()) ;
// DEBUG_MSG("copy pd in range") ;
// thrust::copy_if(pd_vector.begin(),
// pd_vector.end(),
// pd_inrange.begin(),
// gt0()) ;
// dev_pd = pd_inrange ;
// fit gaussians to particles
cudaPrintfInit() ;
DEBUG_MSG("Fitting gaussians to disparity space particles") ;
int n_blocks = min(65535,n_features_total) ;
device_vector<REAL> dev_feature_weights = feature_weights ;
device_vector<Gaussian3D> dev_gaussians(n_features_total) ;
hipLaunchKernelGGL(( fitGaussiansKernel), dim3(n_blocks),dim3(256), 0, 0,
raw_pointer_cast(&dev_u_vector[0]),
raw_pointer_cast(&dev_v_vector[0]),
raw_pointer_cast(&dev_d_vector[0]),
raw_pointer_cast(&dev_feature_weights[0]),
n_features_total,
raw_pointer_cast(&dev_gaussians[0]) ) ;
// cudaPrintfDisplay() ;
if(config.debug){
DEBUG_MSG("Fitted gaussians:") ;
for ( int n = 0 ; n < n_features_total ; n++ ){
Gaussian3D g = dev_gaussians[n] ;
print_feature(g) ;
}
}
// separate in range and out of range gaussians
DEBUG_MSG("Separating in-range features") ;
// for(int n = 0 ; n < map_offsets.size() ; n++)
// DEBUG_VAL(map_offsets[n]) ;
host_vector<int> map_offsets_in(slam.n_particles+1,0) ;
device_vector<Gaussian3D> dev_gaussians_in ;
vector<ParticleMap> particles_out = slam.maps ;
separateDisparityFeatures(dev_gaussians,map_offsets,slam.maps,
dev_gaussians_in,map_offsets_in,
particles_out);
int n_features_in = map_offsets_in.back() ;
// for(int n = 0 ; n < map_offsets_in.size() ; n++)
// DEBUG_VAL(map_offsets_in[n]) ;
device_vector<REAL> dev_pd(dev_gaussians_in.size(),config.pd) ;
// if(config.debug){
// DEBUG_MSG("in-range gaussians:") ;
// for ( int n = 0 ; n < n_features_in ; n++ ){
// Gaussian3D g = dev_gaussians_in[n] ;
// print_feature(g) ;
// }
// }
// if (config.debug){
// DEBUG_MSG("out-of-range particles:") ;
// particles_out[0].print() ;
// }
// generate the birth terms
DEBUG_MSG("Generating birth terms from measurements") ;
int n_measurements = measurements.size() ;
host_vector<Gaussian3D> gaussians_birth(n_measurements*slam.n_particles) ;
for ( int m = 0 ; m < n_measurements*slam.n_particles ; m++ ){
if ( m < n_measurements ){
gaussians_birth[m].weight = safeLog(config.birthWeight) ;
gaussians_birth[m].mean[0] = measurements[m].u ;
gaussians_birth[m].mean[1] = measurements[m].v ;
gaussians_birth[m].mean[2] = config.disparityBirth ;
gaussians_birth[m].cov[0] = pow(config.stdU,2) ;
gaussians_birth[m].cov[4] = pow(config.stdV,2) ;
gaussians_birth[m].cov[8] = pow(config.stdDBirth,2) ;
gaussians_birth[m].cov[1] = 0 ;
gaussians_birth[m].cov[2] = 0 ;
gaussians_birth[m].cov[3] = 0 ;
gaussians_birth[m].cov[5] = 0 ;
gaussians_birth[m].cov[6] = 0 ;
gaussians_birth[m].cov[7] = 0 ;
}
else
{
int idx = m % n_measurements ;
copy_gaussians(gaussians_birth[idx],gaussians_birth[m]) ;
}
// print_feature(gaussians_birth[m]) ;
}
DEBUG_MSG("copy births to device") ;
device_vector<Gaussian3D> dev_gaussians_birth = gaussians_birth ;
DEBUG_VAL(n_measurements) ;
if (config.debug){
for ( int i = 0 ; i < measurements.size() ; i++){
std::cout << measurements[i].u << "," << measurements[i].v << std::endl ;
}
}
device_vector<ImageMeasurement> dev_measurements(n_measurements) ;
DEBUG_MSG("copy measurements to device") ;
dev_measurements = measurements ;
// do the preupdate
DEBUG_MSG("allocate preupdate terms") ;
device_vector<Gaussian3D> dev_gaussians_preupdate(n_features_in*n_measurements) ;
if (dev_gaussians_preupdate.size() > 0){
DEBUG_MSG("Computing disparity pre-update") ;
n_blocks = min(65535,n_features_in) ;
hipLaunchKernelGGL(( preUpdateDisparityKernel), dim3(n_blocks),dim3(256), 0, 0,
raw_pointer_cast(&dev_gaussians_in[0]),
raw_pointer_cast(&dev_pd[0]),
n_features_in,
raw_pointer_cast(&dev_measurements[0]),
n_measurements,
raw_pointer_cast(&dev_gaussians_preupdate[0]));
if (config.debug){
DEBUG_MSG("pre-update terms:") ;
for(int j = 0 ; j < n_features_in*n_measurements ; j++ ){
Gaussian3D g= dev_gaussians_preupdate[j] ;
print_feature(g) ;
}
}
}
// do the sc-phd update
DEBUG_VAL(config.birthWeight) ;
DEBUG_VAL(config.clutterDensity) ;
DEBUG_MSG("allocate particle weights") ;
device_vector<REAL> dev_weights(slam.n_particles) ;
DEBUG_MSG("copy map offsets to device") ;
device_vector<int> dev_map_offsets = map_offsets_in ;
int n_update = n_features_in*(n_measurements+1) +
slam.n_particles*n_measurements ;
DEBUG_VAL(n_update) ;
DEBUG_MSG("allocate device memory for updated gaussians") ;
device_vector<Gaussian3D> dev_gaussians_update(n_update) ;
DEBUG_MSG("allocate device memory for merging flags") ;
device_vector<bool> dev_merge_flags(n_update) ;
n_blocks = min(slam.n_particles,65535) ;
// for ( int n = 0 ; n < slam.n_particles ; n++){
// int x = dev_map_offsets[n] ;
// DEBUG_VAL(x) ;
// }
DEBUG_MSG("Performing SC-PHD update") ;
hipLaunchKernelGGL(( phdUpdateKernel), dim3(n_blocks),dim3(256), 0, 0,
raw_pointer_cast(&dev_gaussians_in[0]),
raw_pointer_cast(&dev_pd[0]),
raw_pointer_cast(&dev_gaussians_preupdate[0]),
raw_pointer_cast(&dev_gaussians_birth[0]),
raw_pointer_cast(&dev_map_offsets[0]),
slam.n_particles,n_measurements,
raw_pointer_cast(&dev_gaussians_update[0]),
raw_pointer_cast(&dev_merge_flags[0]),
raw_pointer_cast(&dev_weights[0])) ;
CUDA_SAFE_THREAD_SYNC() ;
// cudaPrintfDisplay() ;
cudaPrintfEnd();
// manually free some device memory
dev_gaussians_birth.resize(0);
dev_gaussians_birth.shrink_to_fit();
dev_gaussians.resize(0);
dev_gaussians.shrink_to_fit();
dev_gaussians_preupdate.resize(0);
dev_gaussians_preupdate.shrink_to_fit();
if(config.debug){
DEBUG_MSG("Updated gaussians and merge flags: ") ;
for (int n = 0 ; n < n_update ; n++){
bool flag = dev_merge_flags[n] ;
cout << flag << " " ;
Gaussian3D g = dev_gaussians_update[n] ;
print_feature(g) ;
}
}
// do the GM-merging
device_vector<int> dev_merged_sizes(slam.n_particles) ;
device_vector<Gaussian3D> dev_gaussians_merged_tmp(n_update) ;
// recalculate offsets for updated map size
for ( int n = 0 ; n < (slam.n_particles+1) ; n++ ){
map_offsets_in[n] *= (n_measurements+1) ;
map_offsets_in[n] += n_measurements*n ;
// DEBUG_VAL(map_offsets[n]) ;
}
dev_map_offsets = map_offsets_in ;
DEBUG_MSG("Performing GM reduction") ;
hipLaunchKernelGGL(( phdUpdateMergeKernel), dim3(n_blocks),dim3(256), 0, 0,
raw_pointer_cast(&dev_gaussians_update[0]),
raw_pointer_cast(&dev_gaussians_merged_tmp[0]),
raw_pointer_cast(&dev_merged_sizes[0]),
raw_pointer_cast(&dev_merge_flags[0]),
raw_pointer_cast(&dev_map_offsets[0]),
slam.n_particles) ;
CUDA_SAFE_THREAD_SYNC() ;
// copy out the results of the GM reduction, leaving only valid gaussians
host_vector<int> merged_sizes = dev_merged_sizes ;
int n_merged_total = thrust::reduce(merged_sizes.begin(),
merged_sizes.end()) ;
device_vector<Gaussian3D> dev_gaussians_merged(n_merged_total) ;
device_vector<Gaussian3D>::iterator it = dev_gaussians_merged.begin() ;
for ( int n = 0 ; n < merged_sizes.size() ; n++){
it = thrust::copy_n(&dev_gaussians_merged_tmp[map_offsets_in[n]],
merged_sizes[n],
it) ;
}
// get the updated feature weights
device_vector<REAL> dev_merged_weights(n_merged_total) ;
get_weight<Gaussian3D> op ;
thrust::transform(dev_gaussians_merged.begin(),
dev_gaussians_merged.end(),
dev_merged_weights.begin(),
op) ;
host_vector<REAL> merged_weights = dev_merged_weights ;
if (config.debug)
{
DEBUG_MSG("merged feature weights: ") ;
for( int n = 0 ; n < merged_weights.size() ; n++){
cout << merged_weights[n] << endl ;
}
}
// initialize seeds for device-side random number generators
host_vector<RngState> seeds(config.particlesPerFeature) ;
for ( int n = 0 ; n < config.particlesPerFeature ; n++ ){
seeds[n].z1 = static_cast<unsigned>(randu01()*127 + 129) ;
seeds[n].z2 = static_cast<unsigned>(randu01()*127 + 129) ;
seeds[n].z3 = static_cast<unsigned>(randu01()*127 + 129) ;
seeds[n].z4 = static_cast<unsigned>(randu01()*256) ;
}
device_vector<RngState> dev_seeds = seeds ;
// DEBUG_MSG("seeds: ") ;
// for (int n = 0 ; n < seeds.size() ; n++){
// cout << "[" << seeds[n].z1 << "," << seeds[n].z2 << ","
// << seeds[n].z3 << "," << seeds[n].z4 << "]" << endl ;
// }
// generate samples from merged gaussians
DEBUG_MSG("Sampling merged gaussians") ;
int n_particles_merged = n_merged_total*config.particlesPerFeature ;
device_vector<REAL> dev_samples(3*n_particles_merged) ;
n_blocks = ceil(config.particlesPerFeature/256.0) ;
DEBUG_VAL(n_blocks) ;
hipLaunchKernelGGL(( sampleGaussiansKernel), dim3(n_blocks),dim3(256), 0, 0,
raw_pointer_cast(&dev_gaussians_merged[0]),
n_merged_total,
raw_pointer_cast(&dev_seeds[0]),
raw_pointer_cast(&dev_samples[0]));
if(config.debug){
DEBUG_MSG("Verify Gaussian sampling:") ;
Gaussian3D g = dev_gaussians_merged[0] ;
print_feature(g) ;
for(int j = 0 ; j < config.particlesPerFeature ; j++){
cout << dev_samples[j] << ","
<< dev_samples[j+n_particles_merged] << ","
<< dev_samples[j+2*n_particles_merged] << endl ;
}
}
// split samples into individual components
dev_u_vector.resize(n_particles_merged);
dev_v_vector.resize(n_particles_merged);
dev_d_vector.resize(n_particles_merged);
thrust::copy_n(dev_samples.begin(),
n_particles_merged,dev_u_vector.begin()) ;
thrust::copy_n(dev_samples.begin()+n_particles_merged,
n_particles_merged,dev_v_vector.begin()) ;
thrust::copy_n(dev_samples.begin()+2*n_particles_merged,
n_particles_merged,dev_d_vector.begin()) ;
// prepare the camera index vector for transforming the particles
// and save gaussian weights
camera_idx_vector.clear();
int offset = 0 ;
for ( int n = 0 ; n < slam.n_particles ; n++ ){
int n_merged = merged_sizes[n] ;
camera_idx_vector.insert(camera_idx_vector.end(),
n_merged*config.particlesPerFeature, n) ;
slam.maps[n].weights.assign(&merged_weights[offset],
&merged_weights[offset+n_merged]);
offset += n_merged ;
}
dev_camera_idx_vector = camera_idx_vector ;
// // copy merged features to host, and sample disparity particles
// DEBUG_MSG("Sampling disparity space particles") ;
// host_vector<REAL> u_vector ;
// host_vector<REAL> v_vector ;
// host_vector<REAL> d_vector ;
// host_vector<Gaussian3D> gaussians_merged = dev_gaussians_merged ;
// camera_idx_vector.clear();
// for ( int n = 0 ; n < slam.n_particles ; n++ ){
// int offset = map_offsets_in[n] ;
// int n_merged = merged_sizes[n] ;
// if(config.debug)
// DEBUG_VAL(n_merged) ;
// host_vector<REAL> weights(0) ;
// camera_idx_vector.insert(camera_idx_vector.end(),
// n_merged*config.particlesPerFeature, n) ;
// for ( int i = 0 ; i < n_merged ; i++ ){
// Gaussian3D g = gaussians_merged[offset+i] ;
//// if(config.debug)
//// print_feature(g) ;
// vector<REAL> samples(config.particlesPerFeature*3) ;
// randmvn3(g.mean,g.cov,config.particlesPerFeature,&samples[0]);
// REAL* u_ptr = &samples[0] ;
// REAL* v_ptr = u_ptr+config.particlesPerFeature ;
// REAL* d_ptr = v_ptr+config.particlesPerFeature ;
// u_vector.insert(u_vector.end(),
// u_ptr, u_ptr+config.particlesPerFeature) ;
// v_vector.insert(v_vector.end(),
// v_ptr, v_ptr+config.particlesPerFeature) ;
// d_vector.insert(d_vector.end(),
// d_ptr, d_ptr+config.particlesPerFeature) ;
// // save the gaussian weight now
// weights.push_back(g.weight);
// }
// slam.maps[n].weights.assign(weights.begin(),weights.end()) ;
// }
// copy disparity particles to device
// n_particles_total = u_vector.size() ;
// dev_u_vector = u_vector ;
// dev_v_vector = v_vector ;
// dev_d_vector = d_vector ;
dev_x_vector.resize(n_particles_merged);
dev_y_vector.resize(n_particles_merged);
dev_z_vector.resize(n_particles_merged);
// for (int n = 0 ; n < u_vector.size() ; n++ )
// DEBUG_VAL(u_vector[n]) ;
// do the transformation
DEBUG_MSG("Computing disparity to world transformation") ;
thrust::for_each(make_zip_iterator(make_tuple(
dev_camera_idx_vector.begin(),
dev_u_vector.begin(),
dev_v_vector.begin(),
dev_d_vector.begin(),
dev_x_vector.begin(),
dev_y_vector.begin(),
dev_z_vector.begin()
)),
make_zip_iterator(make_tuple(
dev_camera_idx_vector.end(),
dev_u_vector.end(),
dev_v_vector.end(),
dev_d_vector.end(),
dev_x_vector.end(),
dev_y_vector.end(),
dev_z_vector.end()
)),
disparity_to_world_transform(raw_pointer_cast(&dev_camera_vector[0]))) ;
// save euclidean particles
DEBUG_MSG("Saving updated 3D particles") ;
x_vector = dev_x_vector ;
y_vector = dev_y_vector ;
z_vector = dev_z_vector ;
host_vector<REAL> weights = dev_weights ;
if(config.debug){
DEBUG_MSG("Verify disparity to euclidean transformation") ;
for( int j = 0 ; j < config.particlesPerFeature ; j++ ){
cout << x_vector[j] << "," << y_vector[j] << "," << z_vector[j] << endl ;
}
}
offset = 0 ;
for ( int n = 0 ; n < slam.n_particles ; n++ ){
// DEBUG_VAL(slam.weights[n]) ;
int n_particles = merged_sizes[n]*config.particlesPerFeature ;
slam.maps[n].x.assign(x_vector.begin()+offset,
x_vector.begin()+offset+n_particles) ;
slam.maps[n].y.assign(y_vector.begin()+offset,
y_vector.begin()+offset+n_particles) ;
slam.maps[n].z.assign(z_vector.begin()+offset,
z_vector.begin()+offset+n_particles) ;
offset += n_particles ;
// recombine with out of range particles
slam.maps[n].weights.insert(slam.maps[n].weights.end(),
particles_out[n].weights.begin(),
particles_out[n].weights.end()) ;
slam.maps[n].x.insert(slam.maps[n].x.end(),
particles_out[n].x.begin(),
particles_out[n].x.end()) ;
slam.maps[n].y.insert(slam.maps[n].y.end(),
particles_out[n].y.begin(),
particles_out[n].y.end()) ;
slam.maps[n].z.insert(slam.maps[n].z.end(),
particles_out[n].z.begin(),
particles_out[n].z.end()) ;
// update parent particle weights
slam.weights[n] += weights[n] ;
if (config.debug)
DEBUG_VAL(slam.weights[n]) ;
}
// if (config.debug){
// DEBUG_MSG("Updated map particles: ") ;
// for ( int n = 0 ; n < slam.n_particles ; n++ ){
// DEBUG_VAL(n) ;
// slam.maps[n].print() ;
// }
// }
// normalize particle weights
DEBUG_MSG("normalize weights") ;
REAL log_weight_sum = logSumExp(slam.weights) ;
DEBUG_VAL(log_weight_sum) ;
for(int n = 0 ; n < slam.n_particles ; n++ ){
slam.weights[n] -= log_weight_sum ;
if(config.debug)
DEBUG_VAL(slam.weights[n]) ;
}
}
|
0b930feb27b077813c3ae259a08ed6bfca369dc5.cu
|
/*
* main.cpp
*
* Created on: Mar 24, 2011
* Author: cheesinglee
*/
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <algorithm>
#include <sstream>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <dirent.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <string>
#include <cstdarg>
#include "slamtypes.h"
//#include "slamparams.h"
#include <cutil.h>
//#include <complex.h>
//#include <fftw3.h>
#include <assert.h>
#include <float.h>
#include "cuPrintf.cu"
#include "device_math.cuh"
#include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/remove.h>
#include <thrust/copy.h>
#include <thrust/partition.h>
#include <thrust/scan.h>
#include <thrust/iterator/zip_iterator.h>
//#include "ConstantVelocity2DKinematicModel.cu"
// include gcc-compiled boost rng
#include "rng.h"
#ifdef __CDT_PARSER__
#define __device__
#define __global__
#define __constant__
#define __shared__
#define __host__
#endif
#define DEBUG
#ifdef DEBUG
#define DEBUG_MSG(x) cout << "[" << __func__ << "(" << __LINE__ << ")]: " << x << endl
#define DEBUG_VAL(x) cout << "[" << __func__ << "(" << __LINE__ << ")]: " << #x << " = " << x << endl
#else
#define DEBUG_MSG(x)
#define DEBUG_VAL(x)
#endif
//--- Make kernel helper functions externally visible
void
initCphdConstants() ;
void
predictMap(SynthSLAM& p) ;
void
phdPredict(SynthSLAM& particles, ... ) ;
template<class GaussianType>
void
phdPredictVp( SynthSLAM& particles ) ;
SynthSLAM
phdUpdate(SynthSLAM& particles, measurementSet measurements) ;
template <typename T>
T resampleParticles(T oldParticles, int n_particles=-1 ) ;
void
recoverSlamState(SynthSLAM& particles, ConstantVelocityState& expectedPose,
vector<REAL>& cn_estimate ) ;
void
recoverSlamState(DisparitySLAM& particles, ConstantVelocityState& expectedPose ) ;
void
setDeviceConfig( const SlamConfig& config ) ;
//--- End external declarations
//template<class GaussianType>
//__host__ __device__ REAL
//wrapAngle(REAL a) ;
//--- End external function declaration
// SLAM configuration, externally declared
extern SlamConfig config ;
// device memory limit, externally declared
extern size_t deviceMemLimit ;
// dynamic shared memory
extern __shared__ REAL shmem[] ;
using namespace std ;
using namespace thrust ;
// Constant memory variables
__device__ __constant__ RangeBearingMeasurement Z[256] ;
__device__ __constant__ SlamConfig dev_config ;
// other global device variables
REAL* dev_C ;
REAL* dev_factorial ;
REAL* log_factorials ;
//__device__ REAL* dev_qspower ;
//__device__ REAL* dev_pspower ;
REAL* dev_cn_clutter ;
//ConstantVelocityModelProps modelProps = {STDX, STDY,STDTHETA} ;
//ConstantVelocity2DKinematicModel motionModel(modelProps) ;
/// helper function for outputting a Gaussian to std_out
template<class GaussianType>
__host__ void
print_feature(GaussianType f)
{
int dims = getGaussianDim(f) ;
//#if defined(__CUDA_ARCH__)
//#warning __CUDA_ARCH__ is defined
// cuPrintf("%f ",f.weight) ;
// for ( int i = 0 ; i < dims ; i++ )
// cuPrintf("%f ",f.mean[i]) ;
// for ( int i = 0 ; i < dims*dims ; i++ )
// cuPrintf("%f ",f.cov[i]) ;
// cuPrintf("\n") ;
//#else
//#warning __CUDA_ARCH__ is not defined
cout << f.weight << " " ;
for ( int i = 0 ; i < dims ; i++ )
cout << f.mean[i] << " " ;
for ( int i = 0 ; i < dims*dims ; i++)
cout << f.cov[i] << " " ;
cout << endl ;
//#endif
}
/// combine all features from all particles into a single STL vector
template<class GaussianType>
vector<GaussianType> combineFeatures(vector<vector <GaussianType> > maps, ...)
{
vector<GaussianType> concat ;
for ( unsigned int n = 0 ; n < maps.size() ; n++ )
concat.insert( concat.end(), maps[n].begin(), maps[n].end()) ;
return concat ;
}
/// return the next highest power of two
int nextPowerOfTwo(int a)
{
int n = a - 1 ;
n = n | (n >> 1) ;
n = n | (n >> 2) ;
n = n | (n >> 4) ;
n = n | (n >> 8);
n = n | (n >> 16) ;
n = n + 1 ;
return n ;
}
__device__ void
computeBirth( ConstantVelocityState pose, RangeBearingMeasurement z,
Gaussian2D& feature_birth)
{
// invert measurement
REAL theta = pose.ptheta + z.bearing ;
REAL dx = z.range*cos(theta) ;
REAL dy = z.range*sin(theta) ;
feature_birth.mean[0] = pose.px + dx ;
feature_birth.mean[1] = pose.py + dy ;
// inverse measurement jacobian
REAL J[4] ;
J[0] = dx/z.range ;
J[1] = dy/z.range ;
J[2] = -dy ;
J[3] = dx ;
// measurement noise
REAL var_range = pow(dev_config.stdRange*dev_config.birthNoiseFactor,2) ;
REAL var_bearing = pow(dev_config.stdBearing*dev_config.birthNoiseFactor,2) ;
// compute birth covariance
feature_birth.cov[0] = pow(J[0],2)*var_range +
pow(J[2],2)*var_bearing ;
feature_birth.cov[1] = J[0]*J[1]*var_range +
J[2]*J[3]*var_bearing ;
feature_birth.cov[2] =
feature_birth.cov[1] ;
feature_birth.cov[3] = pow(J[1],2)*var_range +
pow(J[3],2)*var_bearing ;
// set birth weight
if(z.label==STATIC_MEASUREMENT || !dev_config.labeledMeasurements)
feature_birth.weight = safeLog(dev_config.birthWeight) ;
else
feature_birth.weight = safeLog(0) ;
}
__device__ void
computeBirth( ConstantVelocityState pose, RangeBearingMeasurement z,
Gaussian4D& feature_birth)
{
// invert measurement
REAL theta = pose.ptheta + z.bearing ;
REAL dx = z.range*cos(theta) ;
REAL dy = z.range*sin(theta) ;
feature_birth.mean[0] = pose.px + dx ;
feature_birth.mean[1] = pose.py + dy ;
// inverse measurement jacobian
REAL J[4] ;
J[0] = dx/z.range ;
J[1] = dy/z.range ;
J[2] = -dy ;
J[3] = dx ;
// measurement noise
REAL var_range = pow(dev_config.stdRange*dev_config.birthNoiseFactor,2) ;
REAL var_bearing = pow(dev_config.stdBearing*dev_config.birthNoiseFactor,2) ;
// mean birth velocity is zero
feature_birth.mean[2] = 0 ;
feature_birth.mean[3] = 0 ;
// upper 2x2 block of covariance matrix = K*R*K'
feature_birth.cov[0] = pow(J[0],2)*var_range +
pow(J[2],2)*var_bearing ;
feature_birth.cov[1] = J[0]*J[1]*var_range +
J[2]*J[3]*var_bearing ;
feature_birth.cov[4] =
feature_birth.cov[1] ;
feature_birth.cov[5] = pow(J[1],2)*var_range +
pow(J[3],2)*var_bearing ;
// lower 2 diagonal terms set to parameter value
feature_birth.cov[10] = dev_config.covVxBirth ;
feature_birth.cov[15] = dev_config.covVyBirth ;
// everything else set to 0
feature_birth.cov[2] = 0 ;
feature_birth.cov[3] = 0 ;
feature_birth.cov[6] = 0 ;
feature_birth.cov[7] = 0 ;
feature_birth.cov[8] = 0 ;
feature_birth.cov[9] = 0 ;
feature_birth.cov[11] = 0 ;
feature_birth.cov[12] = 0 ;
feature_birth.cov[13] = 0 ;
feature_birth.cov[14] = 0 ;
// set birth weight
if (z.label == DYNAMIC_MEASUREMENT || !dev_config.labeledMeasurements)
feature_birth.weight = safeLog(dev_config.birthWeight) ;
else
feature_birth.weight = safeLog(0) ;
}
__device__ void
computePreUpdate( ConstantVelocityState pose, Gaussian2D feature_predict,
int n_features, int n_measure, REAL& feature_pd,
Gaussian2D& feature_nondetect,
Gaussian2D*& features_update)
{
// predicted measurement
REAL dx = feature_predict.mean[0] - pose.px ;
REAL dy = feature_predict.mean[1] - pose.py ;
REAL r2 = dx*dx + dy*dy ;
REAL r = sqrt(r2) ;
REAL bearing = wrapAngle(atan2f(dy,dx) - pose.ptheta) ;
// probability of detection
feature_pd = 0 ;
if ( r <= dev_config.maxRange && fabsf(bearing) <= dev_config.maxBearing )
feature_pd = dev_config.pd ;
// write non-detection term
copy_gaussians(feature_predict,feature_nondetect) ;
feature_nondetect.weight = feature_predict.weight*(1-feature_pd) ;
// measurement jacobian wrt feature
REAL J[4] ;
J[0] = dx/r ;
J[2] = dy/r ;
J[1] = -dy/r2 ;
J[3] = dx/r2 ;
REAL* P = feature_predict.cov ;
// BEGIN Maple-Generated expressions
// innovation covariance
REAL sigma[4] ;
sigma[0] = (P[0] * J[0] + J[2] * P[1]) * J[0] + (J[0] * P[2] + P[3] * J[2]) * J[2] + pow(dev_config.stdRange,2) ;
sigma[1] = (P[0] * J[1] + J[3] * P[1]) * J[0] + (J[1] * P[2] + P[3] * J[3]) * J[2];
sigma[2] = (P[0] * J[0] + J[2] * P[1]) * J[1] + (J[0] * P[2] + P[3] * J[2]) * J[3];
sigma[3] = (P[0] * J[1] + J[3] * P[1]) * J[1] + (J[1] * P[2] + P[3] * J[3]) * J[3] + pow(dev_config.stdBearing,2) ;
// enforce symmetry
sigma[1] = (sigma[1]+sigma[2])/2 ;
sigma[2] = sigma[1] ;
REAL det_sigma = sigma[0]*sigma[3] - sigma[1]*sigma[2] ;
// inverse of sigma
REAL S[4] ;
S[0] = sigma[3]/(det_sigma) ;
S[1] = -sigma[1]/(det_sigma) ;
S[2] = -sigma[2]/(det_sigma) ;
S[3] = sigma[0]/(det_sigma) ;
// Kalman gain
REAL K[4] ;
K[0] = S[0]*(P[0]*J[0] + P[2]*J[2]) + S[1]*(P[0]*J[1] + P[2]*J[3]) ;
K[1] = S[0]*(P[1]*J[0] + P[3]*J[2]) + S[1]*(P[1]*J[1] + P[3]*J[3]) ;
K[2] = S[2]*(P[0]*J[0] + P[2]*J[2]) + S[3]*(P[0]*J[1] + P[2]*J[3]) ;
K[3] = S[2]*(P[1]*J[0] + P[3]*J[2]) + S[3]*(P[1]*J[1] + P[3]*J[3]) ;
REAL cov_update[4] ;
cov_update[0] = ((1 - K[0] * J[0] - K[2] * J[1]) * P[0] + (-K[0] * J[2] - K[2] * J[3]) * P[1]) * (1 - K[0] * J[0] - K[2] * J[1]) + ((1 - K[0] * J[0] - K[2] * J[1]) * P[2] + (-K[0] * J[2] - K[2] * J[3]) * P[3]) * (-K[0] * J[2] - K[2] * J[3]) + pow(K[0], 2) * dev_config.stdRange*dev_config.stdRange + pow(K[2], 2) * dev_config.stdBearing*dev_config.stdBearing;
cov_update[2] = ((1 - K[0] * J[0] - K[2] * J[1]) * P[0] + (-K[0] * J[2] - K[2] * J[3]) * P[1]) * (-K[1] * J[0] - K[3] * J[1]) + ((1 - K[0] * J[0] - K[2] * J[1]) * P[2] + (-K[0] * J[2] - K[2] * J[3]) * P[3]) * (1 - K[1] * J[2] - K[3] * J[3]) + K[0] * dev_config.stdRange*dev_config.stdRange * K[1] + K[2] * dev_config.stdBearing*dev_config.stdBearing * K[3];
cov_update[1] = ((-K[1] * J[0] - K[3] * J[1]) * P[0] + (1 - K[1] * J[2] - K[3] * J[3]) * P[1]) * (1 - K[0] * J[0] - K[2] * J[1]) + ((-K[1] * J[0] - K[3] * J[1]) * P[2] + (1 - K[1] * J[2] - K[3] * J[3]) * P[3]) * (-K[0] * J[2] - K[2] * J[3]) + K[0] * dev_config.stdRange*dev_config.stdRange * K[1] + K[2] * dev_config.stdBearing*dev_config.stdBearing * K[3];
cov_update[3] = ((-K[1] * J[0] - K[3] * J[1]) * P[0] + (1 - K[1] * J[2] - K[3] * J[3]) * P[1]) * (-K[1] * J[0] - K[3] * J[1]) + ((-K[1] * J[0] - K[3] * J[1]) * P[2] + (1 - K[1] * J[2] - K[3] * J[3]) * P[3]) * (1 - K[1] * J[2] - K[3] * J[3]) + pow(K[1], 2) * dev_config.stdRange*dev_config.stdRange + pow(K[3], 2) * dev_config.stdBearing*dev_config.stdBearing;
REAL innov[2] ;
REAL dist = 0 ;
for ( int m = 0 ; m < n_measure ; m++ )
{
int idx = m*n_features ;
innov[0] = Z[m].range - r ;
innov[1] = wrapAngle(Z[m].bearing - bearing) ;
features_update[idx].mean[0] = feature_predict.mean[0] + K[0]*innov[0] + K[2]*innov[1] ;
features_update[idx].mean[1] = feature_predict.mean[1] + K[1]*innov[0] + K[3]*innov[1] ;
for ( int i = 0 ; i < 4 ; i++ )
features_update[idx].cov[i] = cov_update[i] ;
// compute single object likelihood
dist = innov[0]*innov[0]*S[0] +
innov[0]*innov[1]*(S[1] + S[2]) +
innov[1]*innov[1]*S[3] ;
if(Z[m].label==STATIC_MEASUREMENT || !dev_config.labeledMeasurements)
{
// partially update weight (log-transformed)
features_update[idx].weight = safeLog(feature_pd)
+ safeLog(feature_predict.weight)
- 0.5*dist
- safeLog(2*M_PI)
- 0.5*safeLog(det_sigma) ;
}
else
{
features_update[idx].weight = safeLog(0) ;
}
}
}
__device__ void
computePreUpdate( ConstantVelocityState pose, Gaussian4D feature_predict,
int n_features, int n_measure, REAL& feature_pd,
Gaussian4D& feature_nondetect,
Gaussian4D*& features_update)
{
// predicted measurement
REAL dx = feature_predict.mean[0] - pose.px ;
REAL dy = feature_predict.mean[1] - pose.py ;
REAL r2 = dx*dx + dy*dy ;
REAL r = sqrt(r2) ;
REAL bearing = wrapAngle(atan2f(dy,dx) - pose.ptheta) ;
// probability of detection
feature_pd = 0 ;
if ( r <= dev_config.maxRange && fabsf(bearing) <= dev_config.maxBearing )
feature_pd = dev_config.pd ;
// write non-detection term
copy_gaussians(feature_predict,feature_nondetect) ;
feature_nondetect.weight = feature_predict.weight*(1-feature_pd) ;
// measurement jacobian wrt feature
REAL J[4] ;
J[0] = dx/r ;
J[2] = dy/r ;
J[1] = -dy/r2 ;
J[3] = dx/r2 ;
REAL* P = feature_predict.cov ;
// BEGIN Maple-Generated expressions
// innovation covariance
REAL sigma[4] ;
REAL var_range = pow(dev_config.stdRange,2) ;
REAL var_bearing = pow(dev_config.stdBearing,2) ;
sigma[0] = J[0] * (P[0] * J[0] + P[4] * J[2]) + J[2] * (P[1] * J[0] + P[5] * J[2]) + var_range;
sigma[1] = J[1] * (P[0] * J[0] + P[4] * J[2]) + J[3] * (P[1] * J[0] + P[5] * J[2]);
sigma[2] = J[0] * (P[0] * J[1] + P[4] * J[3]) + J[2] * (P[1] * J[1] + P[5] * J[3]);
sigma[3] = J[1] * (P[0] * J[1] + P[4] * J[3]) + J[3] * (P[1] * J[1] + P[5] * J[3]) + var_bearing;
// enforce symmetry
sigma[1] = (sigma[1]+sigma[2])/2 ;
sigma[2] = sigma[1] ;
// makePositiveDefinite(sigma) ;
REAL det_sigma = sigma[0]*sigma[3] - sigma[1]*sigma[2] ;
REAL S[4] ;
S[0] = sigma[3]/(det_sigma) ;
S[1] = -sigma[1]/(det_sigma) ;
S[2] = -sigma[2]/(det_sigma) ;
S[3] = sigma[0]/(det_sigma) ;
// Kalman gain
REAL K[8] ;
K[0] = P[0] * (J[0] * S[0] + J[1] * S[1])
+ P[4] * (J[2] * S[0] + J[3] * S[1]);
K[1] = P[1] * (J[0] * S[0] + J[1] * S[1])
+ P[5] * (J[2] * S[0] + J[3] * S[1]);
K[2] = P[2] * (J[0] * S[0] + J[1] * S[1])
+ P[6] * (J[2] * S[0] + J[3] * S[1]);
K[3] = P[3] * (J[0] * S[0] + J[1] * S[1])
+ P[7] * (J[2] * S[0] + J[3] * S[1]);
K[4] = P[0] * (J[0] * S[2] + J[1] * S[3])
+ P[4] * (J[2] * S[2] + J[3] * S[3]);
K[5] = P[1] * (J[0] * S[2] + J[1] * S[3])
+ P[5] * (J[2] * S[2] + J[3] * S[3]);
K[6] = P[2] * (J[0] * S[2] + J[1] * S[3])
+ P[6] * (J[2] * S[2] + J[3] * S[3]);
K[7] = P[3] * (J[0] * S[2] + J[1] * S[3])
+ P[7] * (J[2] * S[2] + J[3] * S[3]);
// Updated covariance (Joseph Form)
REAL cov_update[16] ;
cov_update[0] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + var_range * pow( K[0], 2) + var_bearing * pow( K[4], 2);
cov_update[1] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + K[0] * var_range * K[1] + K[4] * var_bearing * K[5];
cov_update[2] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + P[2] * (1 - K[0] * J[0] - K[4] * J[1]) + P[6] * (-K[0] * J[2] - K[4] * J[3]) + K[0] * var_range * K[2] + K[4] * var_bearing * K[6];
cov_update[3] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + P[3] * (1 - K[0] * J[0] - K[4] * J[1]) + P[7] * (-K[0] * J[2] - K[4] * J[3]) + K[0] * var_range * K[3] + K[4] * var_bearing * K[7];
cov_update[4] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + K[0] * var_range * K[1] + K[4] * var_bearing * K[5];
cov_update[5] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + var_range * pow( K[1], 2) + var_bearing * pow( K[5], 2);
cov_update[6] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + P[2] * (-K[1] * J[0] - K[5] * J[1]) + P[6] * (1 - K[1] * J[2] - K[5] * J[3]) + K[1] * var_range * K[2] + K[5] * var_bearing * K[6];
cov_update[7] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + P[3] * (-K[1] * J[0] - K[5] * J[1]) + P[7] * (1 - K[1] * J[2] - K[5] * J[3]) + K[1] * var_range * K[3] + K[5] * var_bearing * K[7];
cov_update[8] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + K[0] * var_range * K[2] + K[4] * var_bearing * K[6];
cov_update[9] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + K[1] * var_range * K[2] + K[5] * var_bearing * K[6];
cov_update[10] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + P[2] * (-K[2] * J[0] - K[6] * J[1]) + P[6] * (-K[2] * J[2] - K[6] * J[3]) + P[10] + var_range * pow( K[2], 2) + var_bearing * pow( K[6], 2);
cov_update[11] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + P[3] * (-K[2] * J[0] - K[6] * J[1]) + P[7] * (-K[2] * J[2] - K[6] * J[3]) + P[11] + K[2] * var_range * K[3] + K[6] * var_bearing * K[7];
cov_update[12] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + K[0] * var_range * K[3] + K[4] * var_bearing * K[7];
cov_update[13] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + K[1] * var_range * K[3] + K[5] * var_bearing * K[7];
cov_update[14] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + P[2] * (-K[3] * J[0] - K[7] * J[1]) + P[6] * (-K[3] * J[2] - K[7] * J[3]) + P[14] + K[2] * var_range * K[3] + K[6] * var_bearing * K[7];
cov_update[15] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + P[3] * (-K[3] * J[0] - K[7] * J[1]) + P[7] * (-K[3] * J[2] - K[7] * J[3]) + P[15] + var_range * pow( K[3], 2) + var_bearing * pow( K[7], 2);
REAL innov[2] ;
REAL dist = 0 ;
for ( int m = 0 ; m < n_measure ; m++ )
{
int idx = m*n_features ;
innov[0] = Z[m].range - r ;
innov[1] = wrapAngle(Z[m].bearing - bearing) ;
features_update[idx].mean[0] = feature_predict.mean[0] + K[0]*innov[0] + K[4]*innov[1] ;
features_update[idx].mean[1] = feature_predict.mean[1] + K[1]*innov[0] + K[5]*innov[1] ;
features_update[idx].mean[2] = feature_predict.mean[2] + K[2]*innov[0] + K[6]*innov[1] ;
features_update[idx].mean[3] = feature_predict.mean[3] + K[3]*innov[0] + K[7]*innov[1] ;
for ( int i = 0 ; i < 16 ; i++ )
features_update[idx].cov[i] = cov_update[i] ;
// compute single object likelihood
dist = innov[0]*innov[0]*S[0] +
innov[0]*innov[1]*(S[1] + S[2]) +
innov[1]*innov[1]*S[3] ;
if(Z[m].label==DYNAMIC_MEASUREMENT || !dev_config.labeledMeasurements)
{
// partially update weight (log-transformed)
features_update[idx].weight = safeLog(feature_pd)
+ safeLog(feature_predict.weight)
- 0.5*dist
- safeLog(2*M_PI)
- 0.5*safeLog(det_sigma) ;
}
else
{
features_update[idx].weight = safeLog(0) ;
}
}
}
///// computes various components for the Kalman update of a particular feature
///*!
// * Given a vehicle pose and feature Gaussian, the function computes the Kalman
// * gain, updated covariance, innovation covariance, determinant of the
// * innovation covariance, probability of detection, and predicted measurement.
// * The computed values are stored at the addresses referenced by the passed
// * pointers.
// *
// * This code is specific to XY-heading vehicle state with range-bearing
// * measurements to XY point features.
// \param pose vehicle pose
// \param feature feature gaussian
// \param K pointer to store Kalman gain matrix
// \param cov_update pointer to store updated covariance matrix
// \param det_sigma pointer to store determinant of innov. covariance
// \param S pointer to store innov. covariance matrix.
// \param feature_pd pointer to store feature probability of detect.
// \param z_predict pointer to store predicted measurement
// */
//__device__ void
//computePreUpdateComponents( ConstantVelocityState pose,
// Gaussian2D feature, REAL* K,
// REAL* cov_update, REAL* det_sigma,
// REAL* S, REAL* feature_pd,
// RangeBearingMeasurement* z_predict )
//{
// // predicted measurement
// REAL dx = feature.mean[0] - pose.px ;
// REAL dy = feature.mean[1] - pose.py ;
// REAL r2 = dx*dx + dy*dy ;
// REAL r = sqrt(r2) ;
// REAL bearing = wrapAngle(atan2f(dy,dx) - pose.ptheta) ;
// z_predict->range = r ;
// z_predict->bearing = bearing ;
// // probability of detection
// if ( r <= dev_config.maxRange && fabsf(bearing) <= dev_config.maxBearing )
// *feature_pd = dev_config.pd ;
// else
// *feature_pd = 0 ;
// // measurement jacobian wrt feature
// REAL J[4] ;
// J[0] = dx/r ;
// J[2] = dy/r ;
// J[1] = -dy/r2 ;
// J[3] = dx/r2 ;
// // predicted feature covariance
// REAL* P = feature.cov ;
// // BEGIN Maple-Generated expressions
// // innovation covariance
// REAL sigma[4] ;
// sigma[0] = (P[0] * J[0] + J[2] * P[1]) * J[0] + (J[0] * P[2] + P[3] * J[2]) * J[2] + pow(dev_config.stdRange,2) ;
// sigma[1] = (P[0] * J[1] + J[3] * P[1]) * J[0] + (J[1] * P[2] + P[3] * J[3]) * J[2];
// sigma[2] = (P[0] * J[0] + J[2] * P[1]) * J[1] + (J[0] * P[2] + P[3] * J[2]) * J[3];
// sigma[3] = (P[0] * J[1] + J[3] * P[1]) * J[1] + (J[1] * P[2] + P[3] * J[3]) * J[3] + pow(dev_config.stdBearing,2) ;
// // enforce symmetry
// sigma[1] = (sigma[1]+sigma[2])/2 ;
// sigma[2] = sigma[1] ;
//// makePositiveDefinite(sigma) ;
// *det_sigma = sigma[0]*sigma[3] - sigma[1]*sigma[2] ;
// S[0] = sigma[3]/(*det_sigma) ;
// S[1] = -sigma[1]/(*det_sigma) ;
// S[2] = -sigma[2]/(*det_sigma) ;
// S[3] = sigma[0]/(*det_sigma) ;
// // Kalman gain
// K[0] = S[0]*(P[0]*J[0] + P[2]*J[2]) + S[1]*(P[0]*J[1] + P[2]*J[3]) ;
// K[1] = S[0]*(P[1]*J[0] + P[3]*J[2]) + S[1]*(P[1]*J[1] + P[3]*J[3]) ;
// K[2] = S[2]*(P[0]*J[0] + P[2]*J[2]) + S[3]*(P[0]*J[1] + P[2]*J[3]) ;
// K[3] = S[2]*(P[1]*J[0] + P[3]*J[2]) + S[3]*(P[1]*J[1] + P[3]*J[3]) ;
// // Updated covariance (Joseph Form)
// cov_update[0] = ((1 - K[0] * J[0] - K[2] * J[1]) * P[0] + (-K[0] * J[2] - K[2] * J[3]) * P[1]) * (1 - K[0] * J[0] - K[2] * J[1]) + ((1 - K[0] * J[0] - K[2] * J[1]) * P[2] + (-K[0] * J[2] - K[2] * J[3]) * P[3]) * (-K[0] * J[2] - K[2] * J[3]) + pow(K[0], 2) * dev_config.stdRange*dev_config.stdRange + pow(K[2], 2) * dev_config.stdBearing*dev_config.stdBearing;
// cov_update[2] = ((1 - K[0] * J[0] - K[2] * J[1]) * P[0] + (-K[0] * J[2] - K[2] * J[3]) * P[1]) * (-K[1] * J[0] - K[3] * J[1]) + ((1 - K[0] * J[0] - K[2] * J[1]) * P[2] + (-K[0] * J[2] - K[2] * J[3]) * P[3]) * (1 - K[1] * J[2] - K[3] * J[3]) + K[0] * dev_config.stdRange*dev_config.stdRange * K[1] + K[2] * dev_config.stdBearing*dev_config.stdBearing * K[3];
// cov_update[1] = ((-K[1] * J[0] - K[3] * J[1]) * P[0] + (1 - K[1] * J[2] - K[3] * J[3]) * P[1]) * (1 - K[0] * J[0] - K[2] * J[1]) + ((-K[1] * J[0] - K[3] * J[1]) * P[2] + (1 - K[1] * J[2] - K[3] * J[3]) * P[3]) * (-K[0] * J[2] - K[2] * J[3]) + K[0] * dev_config.stdRange*dev_config.stdRange * K[1] + K[2] * dev_config.stdBearing*dev_config.stdBearing * K[3];
// cov_update[3] = ((-K[1] * J[0] - K[3] * J[1]) * P[0] + (1 - K[1] * J[2] - K[3] * J[3]) * P[1]) * (-K[1] * J[0] - K[3] * J[1]) + ((-K[1] * J[0] - K[3] * J[1]) * P[2] + (1 - K[1] * J[2] - K[3] * J[3]) * P[3]) * (1 - K[1] * J[2] - K[3] * J[3]) + pow(K[1], 2) * dev_config.stdRange*dev_config.stdRange + pow(K[3], 2) * dev_config.stdBearing*dev_config.stdBearing;
//}
//__device__ void
//computePreUpdateComponentsDynamic( ConstantVelocityState pose,
// Gaussian4D feature, REAL* K,
// REAL* cov_update, REAL* det_sigma,
// REAL* S, REAL* feature_pd,
// RangeBearingMeasurement* z_predict )
//{
// // predicted measurement
// REAL dx = feature.mean[0] - pose.px ;
// REAL dy = feature.mean[1] - pose.py ;
// REAL r2 = dx*dx + dy*dy ;
// REAL r = sqrt(r2) ;
// REAL bearing = wrapAngle(atan2f(dy,dx) - pose.ptheta) ;
// z_predict->range = r ;
// z_predict->bearing = bearing ;
// // probability of detection
// if ( r <= dev_config.maxRange && fabsf(bearing) <= dev_config.maxBearing )
// *feature_pd = dev_config.pd ;
// else
// *feature_pd = 0 ;
// // measurement jacobian wrt feature
// REAL J[4] ;
// J[0] = dx/r ;
// J[2] = dy/r ;
// J[1] = -dy/r2 ;
// J[3] = dx/r2 ;
// // predicted feature covariance
// REAL* P = feature.cov ;
// // BEGIN Maple-Generated expressions
// // innovation covariance
// REAL sigma[4] ;
// REAL var_range = pow(dev_config.stdRange,2) ;
// REAL var_bearing = pow(dev_config.stdBearing,2) ;
// sigma[0] = J[0] * (P[0] * J[0] + P[4] * J[2]) + J[2] * (P[1] * J[0] + P[5] * J[2]) + var_range;
// sigma[1] = J[1] * (P[0] * J[0] + P[4] * J[2]) + J[3] * (P[1] * J[0] + P[5] * J[2]);
// sigma[2] = J[0] * (P[0] * J[1] + P[4] * J[3]) + J[2] * (P[1] * J[1] + P[5] * J[3]);
// sigma[3] = J[1] * (P[0] * J[1] + P[4] * J[3]) + J[3] * (P[1] * J[1] + P[5] * J[3]) + var_bearing;
// // enforce symmetry
// sigma[1] = (sigma[1]+sigma[2])/2 ;
// sigma[2] = sigma[1] ;
//// makePositiveDefinite(sigma) ;
// *det_sigma = sigma[0]*sigma[3] - sigma[1]*sigma[2] ;
// S[0] = sigma[3]/(*det_sigma) ;
// S[1] = -sigma[1]/(*det_sigma) ;
// S[2] = -sigma[2]/(*det_sigma) ;
// S[3] = sigma[0]/(*det_sigma) ;
// // Kalman gain
// K[0] = P[0] * (J[0] * S[0] + J[1] * S[1])
// + P[4] * (J[2] * S[0] + J[3] * S[1]);
// K[1] = P[1] * (J[0] * S[0] + J[1] * S[1])
// + P[5] * (J[2] * S[0] + J[3] * S[1]);
// K[2] = P[2] * (J[0] * S[0] + J[1] * S[1])
// + P[6] * (J[2] * S[0] + J[3] * S[1]);
// K[3] = P[3] * (J[0] * S[0] + J[1] * S[1])
// + P[7] * (J[2] * S[0] + J[3] * S[1]);
// K[4] = P[0] * (J[0] * S[2] + J[1] * S[3])
// + P[4] * (J[2] * S[2] + J[3] * S[3]);
// K[5] = P[1] * (J[0] * S[2] + J[1] * S[3])
// + P[5] * (J[2] * S[2] + J[3] * S[3]);
// K[6] = P[2] * (J[0] * S[2] + J[1] * S[3])
// + P[6] * (J[2] * S[2] + J[3] * S[3]);
// K[7] = P[3] * (J[0] * S[2] + J[1] * S[3])
// + P[7] * (J[2] * S[2] + J[3] * S[3]);
// // Updated covariance (Joseph Form)
// cov_update[0] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + var_range * pow( K[0], 2) + var_bearing * pow( K[4], 2);
// cov_update[1] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + K[0] * var_range * K[1] + K[4] * var_bearing * K[5];
// cov_update[2] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + P[2] * (1 - K[0] * J[0] - K[4] * J[1]) + P[6] * (-K[0] * J[2] - K[4] * J[3]) + K[0] * var_range * K[2] + K[4] * var_bearing * K[6];
// cov_update[3] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + P[3] * (1 - K[0] * J[0] - K[4] * J[1]) + P[7] * (-K[0] * J[2] - K[4] * J[3]) + K[0] * var_range * K[3] + K[4] * var_bearing * K[7];
// cov_update[4] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + K[0] * var_range * K[1] + K[4] * var_bearing * K[5];
// cov_update[5] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + var_range * pow( K[1], 2) + var_bearing * pow( K[5], 2);
// cov_update[6] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + P[2] * (-K[1] * J[0] - K[5] * J[1]) + P[6] * (1 - K[1] * J[2] - K[5] * J[3]) + K[1] * var_range * K[2] + K[5] * var_bearing * K[6];
// cov_update[7] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + P[3] * (-K[1] * J[0] - K[5] * J[1]) + P[7] * (1 - K[1] * J[2] - K[5] * J[3]) + K[1] * var_range * K[3] + K[5] * var_bearing * K[7];
// cov_update[8] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + K[0] * var_range * K[2] + K[4] * var_bearing * K[6];
// cov_update[9] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + K[1] * var_range * K[2] + K[5] * var_bearing * K[6];
// cov_update[10] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + P[2] * (-K[2] * J[0] - K[6] * J[1]) + P[6] * (-K[2] * J[2] - K[6] * J[3]) + P[10] + var_range * pow( K[2], 2) + var_bearing * pow( K[6], 2);
// cov_update[11] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + P[3] * (-K[2] * J[0] - K[6] * J[1]) + P[7] * (-K[2] * J[2] - K[6] * J[3]) + P[11] + K[2] * var_range * K[3] + K[6] * var_bearing * K[7];
// cov_update[12] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + K[0] * var_range * K[3] + K[4] * var_bearing * K[7];
// cov_update[13] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + K[1] * var_range * K[3] + K[5] * var_bearing * K[7];
// cov_update[14] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + P[2] * (-K[3] * J[0] - K[7] * J[1]) + P[6] * (-K[3] * J[2] - K[7] * J[3]) + P[14] + K[2] * var_range * K[3] + K[6] * var_bearing * K[7];
// cov_update[15] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + P[3] * (-K[3] * J[0] - K[7] * J[1]) + P[7] * (-K[3] * J[2] - K[7] * J[3]) + P[15] + var_range * pow( K[3], 2) + var_bearing * pow( K[7], 2);
//}
///// kernel for computing various constants used in the CPHD filter
//__global__ void
//cphdConstantsKernel( REAL* dev_factorial, REAL* dev_C, REAL* dev_cn_clutter )
//{
// int n = threadIdx.x ;
// int k = blockIdx.x ;
// REAL* factorial = (REAL*)shmem ;
// factorial[n] = dev_factorial[n] ;
// __syncthreads() ;
// // compute the log binomial coefficients (nchoosek)
// int stride = dev_config.maxCardinality + 1 ;
// int idx = k*stride + n ;
// REAL log_nchoosek = 0 ;
// if ( k == 0 )
// {
// log_nchoosek = 0 ;
// }
// else if ( n == 0 || k > n )
// {
// log_nchoosek = LOG0 ;
// }
// else
// {
// log_nchoosek = factorial[n] - factorial[k]
// - factorial[n-k] ;
// }
// dev_C[idx] = log_nchoosek ;
// // thread block 0 computes the clutter cardinality
// if ( k == 0 )
// {
// dev_cn_clutter[n] = n*safeLog(dev_config.clutterRate)
// - dev_config.clutterRate
// - factorial[n] ;
// }
//// // for debugging: clutter cardinality with constant number of clutter
//// if ( k== 0 )
//// {
//// if ( n == dev_config.clutterRate)
//// dev_cn_clutter[n] = 0 ;
//// else
//// dev_cn_clutter[n] = LOG0 ;
//// }
//}
///// host-side helper function to call cphdConstantsKernel
//void
//initCphdConstants()
//{
// log_factorials = (REAL*)malloc( (config.maxCardinality+1)*sizeof(REAL) ) ;
// log_factorials[0] = 0 ;
// for ( int n = 1 ; n <= config.maxCardinality ; n++ )
// {
// log_factorials[n] = log_factorials[n-1] + safeLog((REAL)n) ;
// }
// CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_C,
// pow(config.maxCardinality+1,2)*sizeof(REAL) ) ) ;
// CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_factorial,
// (config.maxCardinality+1)*sizeof(REAL) ) ) ;
// CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_cn_clutter,
// (config.maxCardinality+1)*sizeof(REAL) ) ) ;
// CUDA_SAFE_CALL( cudaMemcpy( dev_factorial, &log_factorials[0],
// (config.maxCardinality+1)*sizeof(REAL),
// cudaMemcpyHostToDevice ) ) ;
// CUDA_SAFE_THREAD_SYNC() ;
//// CUDA_SAFE_CALL(
//// cudaMalloc( (void**)&dev_pspower,
//// (config.maxCardinality+1)*sizeof(REAL) ) ) ;
//// CUDA_SAFE_CALL(
//// cudaMalloc( (void**)&dev_qspower,
//// (config.maxCardinality+1)*sizeof(REAL) ) ) ;
// int n_blocks = config.maxCardinality+1 ;
// int n_threads = n_blocks ;
// cphdConstantsKernel<<<n_blocks, n_threads, n_threads*sizeof(REAL)>>>
// ( dev_factorial, dev_C, dev_cn_clutter ) ;
// CUDA_SAFE_THREAD_SYNC() ;
//}
/// kernel for particle prediction with an ackerman steering motion model
__global__ void
phdPredictKernelAckerman(ConstantVelocityState* particles_prior,
AckermanControl control,
AckermanNoise* noise,
ConstantVelocityState* particles_predict,
int n_predict)
{
const int tid = threadIdx.x ;
const int predict_idx = blockIdx.x*blockDim.x + tid ;
if (predict_idx < n_predict)
{
// get the prior state from which this prediction is generated
const int prior_idx = floor((float)predict_idx/dev_config.nPredictParticles) ;
ConstantVelocityState oldState = particles_prior[prior_idx] ;
// use the motion model to compute the prediction
ConstantVelocityState newState ;
REAL ve_noisy = control.v_encoder + noise[predict_idx].n_encoder ;
REAL alpha_noisy = control.alpha + noise[predict_idx].n_alpha ;
REAL vc = ve_noisy/(1-tan(alpha_noisy)*dev_config.h/dev_config.l) ;
REAL xc_dot = vc*cos(oldState.ptheta) ;
REAL yc_dot = vc*sin(oldState.ptheta) ;
REAL thetac_dot = vc*tan(alpha_noisy)/dev_config.l ;
REAL dt = dev_config.dt/dev_config.subdividePredict ;
newState.px = oldState.px +
dt*(xc_dot -
thetac_dot*( dev_config.a*sin(oldState.ptheta) + dev_config.b*cos(oldState.ptheta) )
) ;
newState.py = oldState.py +
dt*(yc_dot +
thetac_dot*( dev_config.a*cos(oldState.ptheta) - dev_config.b*sin(oldState.ptheta) )
) ;
newState.ptheta = wrapAngle(oldState.ptheta + dt*thetac_dot) ;
newState.vx = 0 ;
newState.vy = 0 ;
newState.vtheta = 0 ;
// save predicted state to memory
particles_predict[predict_idx] = newState ;
}
}
__global__ void
phdPredictKernel(ConstantVelocityState* particles_prior,
ConstantVelocityNoise* noise, ConstantVelocityState* particles_predict,
int n_predict)
{
const int tid = threadIdx.x ;
const int predict_idx = blockIdx.x*blockDim.x + tid ;
if (predict_idx < n_predict)
{
const int prior_idx = floor((float)predict_idx/dev_config.nPredictParticles) ;
ConstantVelocityState oldState = particles_prior[prior_idx] ;
ConstantVelocityState newState ;
REAL dt = dev_config.dt/dev_config.subdividePredict ;
// typename modelType::stateType newState = mm(particles[particleIdx],*control,noise[particleIdx]) ;
newState.px = oldState.px +
dt*(oldState.vx*cos(oldState.ptheta) -
oldState.vy*sin(oldState.ptheta))+
dt*dt*0.5*(noise[predict_idx].ax*cos(oldState.ptheta) -
noise[predict_idx].ay*sin(oldState.ptheta)) ;
newState.py = oldState.py +
dt*(oldState.vx*sin(oldState.ptheta) +
oldState.vy*cos(oldState.ptheta)) +
dt*dt*0.5*(noise[predict_idx].ax*sin(oldState.ptheta) +
noise[predict_idx].ay*cos(oldState.ptheta)) ;
newState.ptheta = wrapAngle(oldState.ptheta +
dt*oldState.vtheta +
0.5*dt*dt*noise[predict_idx].atheta) ;
newState.vx = oldState.vx + dt*noise[predict_idx].ax ;
newState.vy = oldState.vy + dt*noise[predict_idx].ay ;
newState.vtheta = oldState.vtheta + dt*noise[predict_idx].atheta ;
particles_predict[predict_idx] = newState ;
}
}
/// predict the cardinality distribution for the CPHD filter
/**
Each thread block processes the cardinality for a single particle. Each thread
inside the block computes the predicted cardinality for a particular value of
n.
*/
__global__ void
cardinalityPredictKernel( REAL* cn_prior, REAL* cn_births, REAL* dev_C,
REAL* cn_predict )
{
int n = threadIdx.x ;
int cn_offset = blockIdx.x * (dev_config.maxCardinality+1) ;
REAL* cn_prior_shared = (REAL*)shmem ;
// load the prior cardinality into shared mem
cn_prior_shared[n] = cn_prior[cn_offset+n] ;
__syncthreads() ;
REAL outersum = 0 ;
for ( int j = 0 ; j <= n ; j++ )
{
outersum += exp(cn_births[n-j]+cn_prior_shared[j]) ;
}
if ( outersum != 0)
cn_predict[cn_offset+n] = safeLog(outersum) ;
else
cn_predict[cn_offset+n] = LOG0 ;
}
/// compute the predicted states of every feature
template<class GaussianType, class MotionModelType>
__global__ void
predictMapKernel(GaussianType* features_prior, MotionModelType model,
int n_features, GaussianType* features_predict)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x ;
// loop over all features
for (int j = 0 ; j < n_features ; j+=blockDim.x*gridDim.x)
{
int idx = j+tid ;
if ( idx < n_features )
{
features_predict[idx] = model.compute_prediction(features_prior[idx],
dev_config.dt) ;
}
}
}
__global__ void
predictMapKernelMixed(Gaussian4D* features_prior,
ConstantVelocityMotionModel model,
int n_features, Gaussian4D* features_predict,
Gaussian2D* features_jump)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x ;
// loop over all features
for (int j = 0 ; j < n_features ; j+=blockDim.x*gridDim.x)
{
int idx = j+tid ;
if ( idx < n_features )
{
REAL vx = features_prior[idx].mean[2] ;
REAL vy = features_prior[idx].mean[3] ;
REAL v_mag = sqrt(vx*vx + vy*vy) ;
REAL sigmoid_v = 1/(1+exp(dev_config.beta*(dev_config.tau - v_mag))) ;
REAL p_jmm ;
REAL ps ;
REAL scale_x = 1 ;
REAL scale_y = 1 ;
if (dev_config.featureModel==DYNAMIC_MODEL)
{
p_jmm = 1 ;
ps = logistic_function(v_mag,0,1-dev_config.ps,dev_config.beta,
dev_config.tau) ;
ps = 1-ps ;
scale_x = logistic_function(vx,0,1,dev_config.beta,
dev_config.tau) ;
scale_y = logistic_function(vy,0,1,dev_config.beta,
dev_config.tau) ;
}
else if(dev_config.featureModel==MIXED_MODEL)
{
p_jmm = sigmoid_v ;
ps = dev_config.ps ;
// p_jmm = 1 ;
}
features_predict[idx] = model.compute_prediction(features_prior[idx],
dev_config.dt,
scale_x,scale_y) ;
features_predict[idx].weight = p_jmm*ps
*features_predict[idx].weight ;
features_jump[idx].weight = (1-p_jmm)*features_prior[idx].weight ;
features_jump[idx].mean[0] = features_prior[idx].mean[0] ;
features_jump[idx].mean[1] = features_prior[idx].mean[1] ;
features_jump[idx].cov[0] = features_prior[idx].cov[0] ;
features_jump[idx].cov[1] = features_prior[idx].cov[1] ;
features_jump[idx].cov[2] = features_prior[idx].cov[4] ;
features_jump[idx].cov[3] = features_prior[idx].cov[5] ;
}
}
}
void
predictMapMixed(SynthSLAM& particles)
{
// combine all dynamic features into one vector
vector<Gaussian4D> all_features = combineFeatures(particles.maps_dynamic) ;
int n_features = all_features.size() ;
// allocate memory
Gaussian4D* dev_features_prior = NULL ;
Gaussian4D* dev_features_predict = NULL ;
Gaussian2D* dev_features_jump = NULL ;
CUDA_SAFE_CALL(cudaMalloc((void**)&dev_features_prior,
n_features*sizeof(Gaussian4D) ) ) ;
CUDA_SAFE_CALL(cudaMalloc((void**)&dev_features_predict,
n_features*sizeof(Gaussian4D) ) ) ;
CUDA_SAFE_CALL(cudaMalloc((void**)&dev_features_jump,
n_features*sizeof(Gaussian2D) ) ) ;
CUDA_SAFE_CALL(cudaMemcpy(dev_features_prior,&all_features[0],
n_features*sizeof(Gaussian4D),
cudaMemcpyHostToDevice) ) ;
int n_blocks = (n_features+255)/256 ;
// configure the feature motion model
ConstantVelocityMotionModel motion_model ;
motion_model.std_accx = config.stdAxMap ;
motion_model.std_accy = config.stdAyMap ;
// launch the kernel
predictMapKernelMixed<<<n_blocks,256>>>
(dev_features_prior,motion_model,n_features, dev_features_predict,
dev_features_jump ) ;
// copy results from device
vector<Gaussian2D> all_features_jump( all_features.size() ) ;
CUDA_SAFE_CALL(cudaMemcpy(&all_features[0],dev_features_predict,
n_features*sizeof(Gaussian4D),
cudaMemcpyDeviceToHost)) ;
CUDA_SAFE_CALL(cudaMemcpy(&all_features_jump[0],dev_features_jump,
n_features*sizeof(Gaussian2D),
cudaMemcpyDeviceToHost)) ;
// load predicted features back into particles
Gaussian4D* begin = &all_features[0] ;
Gaussian4D* end = begin
+ particles.maps_dynamic[0].size() ;
Gaussian2D* begin_jump = &all_features_jump[0] ;
Gaussian2D* end_jump = begin_jump
+ particles.maps_dynamic[0].size() ;
for ( int n = 0 ; n < particles.n_particles ; n++ )
{
particles.maps_dynamic[n].assign(begin,end) ;
// if(config.featureModel==MIXED_MODEL)
// {
// particles.maps_static[n].insert(particles.maps_static[n].end(),
// begin_jump,
// end_jump ) ;
// }
if ( n < particles.n_particles - 1)
{
begin = end ;
end += particles.maps_dynamic[n+1].size() ;
begin_jump = end_jump ;
end_jump += particles.maps_dynamic[n+1].size() ;
}
}
// free memory
CUDA_SAFE_CALL( cudaFree( dev_features_prior ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_features_predict ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_features_jump ) ) ;
}
//template <class GaussianType>
//void
//predictMap(SynthSLAM& particles)
//{
// // combine all dynamic features into one vector
// vector<Gaussian4D> all_features = combineFeatures(particles.maps_dynamic) ;
// int n_features = all_features.size() ;
// GaussianType* dev_features_prior = NULL ;
// GaussianType* dev_features_predict = NULL ;
// CUDA_SAFE_CALL(cudaMalloc((void**)&dev_features_prior,
// n_features*sizeof(Gaussian4D) ) ) ;
// CUDA_SAFE_CALL(cudaMalloc((void**)&dev_features_predict,
// n_features*sizeof(Gaussian4D) ) ) ;
// CUDA_SAFE_CALL(cudaMemcpy(dev_features_prior,&all_features[0],
// n_features*sizeof(Gaussian4D),
// cudaMemcpyHostToDevice) ) ;
// int n_blocks = (n_features+255)/256 ;
// ConstantVelocityMotionModel motion_model ;
// motion_model.std_accx = config.stdAxMap ;
// motion_model.std_accy = config.stdAyMap ;
// predictMapKernel<<<n_blocks,256>>>
// (dev_features_prior,motion_model,n_features, dev_features_predict ) ;
// CUDA_SAFE_CALL(cudaMemcpy(&all_features[0],dev_features_predict,
// n_features*sizeof(GaussianType),
// cudaMemcpyDeviceToHost)) ;
// // load predicted features back into particles
// GaussianType* begin = &all_features[0] ;
// GaussianType* end = begin
// + particles.maps[0].size() ;
// for ( int n = 0 ; n < particles.n_particles ; n++ )
// {
// particles.maps_dynamic[n].assign(begin,end) ;
// if ( n < particles.n_particles - 1)
// {
// begin = end ;
// end += particles.maps_dynamic[n+1].size() ;
// }
// }
// CUDA_SAFE_CALL( cudaFree( dev_features_prior ) ) ;
// CUDA_SAFE_CALL( cudaFree( dev_features_predict ) ) ;
//}
/// host-side helper function for PHD filter prediction
void
phdPredict(SynthSLAM& particles, ... )
{
// start timer
cudaEvent_t start, stop ;
cudaEventCreate( &start ) ;
cudaEventCreate( &stop ) ;
cudaEventRecord( start,0 ) ;
int n_particles = particles.n_particles ;
int nPredict = n_particles*config.nPredictParticles ;
// allocate device memory
ConstantVelocityState* dev_states_prior = NULL ;
ConstantVelocityState* dev_states_predict = NULL ;
CUDA_SAFE_CALL(
cudaMalloc((void**)&dev_states_prior,
n_particles*sizeof(ConstantVelocityState) ) ) ;
CUDA_SAFE_CALL(
cudaMalloc((void**)&dev_states_predict,
nPredict*sizeof(ConstantVelocityState) ) ) ;
// copy inputs
CUDA_SAFE_CALL(
cudaMemcpy(dev_states_prior, &particles.states[0],
n_particles*sizeof(ConstantVelocityState),
cudaMemcpyHostToDevice) ) ;
if ( config.motionType == CV_MOTION )
{
// generate random noise values
std::vector<ConstantVelocityNoise> noiseVector(nPredict) ;
for (unsigned int i = 0 ; i < nPredict ; i++ )
{
noiseVector[i].ax = 3*config.ax * randn() ;
noiseVector[i].ay = 3*config.ay * randn() ;
noiseVector[i].atheta = 3*config.ayaw * randn() ;
}
ConstantVelocityNoise* dev_noise = NULL ;
CUDA_SAFE_CALL(
cudaMalloc((void**)&dev_noise,
n_particles*sizeof(ConstantVelocityNoise) ) ) ;
CUDA_SAFE_CALL(
cudaMemcpy(dev_noise, &noiseVector[0],
n_particles*sizeof(ConstantVelocityNoise),
cudaMemcpyHostToDevice) ) ;
// launch the kernel
int nThreads = min(nPredict,256) ;
int nBlocks = (nPredict+255)/256 ;
phdPredictKernel
<<<nBlocks, nThreads>>>
( dev_states_prior,dev_noise,dev_states_predict,nPredict ) ;
cudaFree(dev_noise) ;
}
else if( config.motionType == ACKERMAN_MOTION )
{
// read in the control data structure from variable argument list
va_list argptr ;
va_start(argptr,particles) ;
AckermanControl control = (AckermanControl)va_arg(argptr,AckermanControl) ;
va_end(argptr) ;
// generate random noise values
std::vector<AckermanNoise> noiseVector(nPredict) ;
for (unsigned int i = 0 ; i < nPredict ; i++ )
{
noiseVector[i].n_alpha = config.stdAlpha * randn() ;
noiseVector[i].n_encoder = config.stdEncoder * randn() ;
}
AckermanNoise* dev_noise = NULL ;
CUDA_SAFE_CALL(
cudaMalloc((void**)&dev_noise,
nPredict*sizeof(AckermanNoise) ) ) ;
CUDA_SAFE_CALL(
cudaMemcpy(dev_noise, &noiseVector[0],
nPredict*sizeof(AckermanNoise),
cudaMemcpyHostToDevice) ) ;
// launch the kernel
int nThreads = min(nPredict,256) ;
int nBlocks = (nPredict+255)/256 ;
phdPredictKernelAckerman
<<<nBlocks, nThreads>>>
(dev_states_prior,control,dev_noise,dev_states_predict,nPredict) ;
cudaFree(dev_noise) ;
}
// copy results from device
ConstantVelocityState* states_predict = (ConstantVelocityState*)
malloc(nPredict*sizeof(ConstantVelocityState)) ;
CUDA_SAFE_CALL(cudaMemcpy(states_predict, dev_states_predict,
nPredict*sizeof(ConstantVelocityState),
cudaMemcpyDeviceToHost) ) ;
particles.states.assign( states_predict, states_predict+nPredict ) ;
// duplicate the PHD filter maps and cardinalities for the newly spawned
// vehicle particles, and downscale particle weights
if ( config.nPredictParticles > 1 )
{
DEBUG_MSG("Duplicating maps") ;
vector<vector<Gaussian2D> > maps_predict_static ;
vector<vector<Gaussian4D> > maps_predict_dynamic ;
vector<REAL> weights_predict ;
vector< vector <REAL> > cardinalities_predict ;
vector<int> resample_idx_predict ;
// maps_predict_static.clear();
// maps_predict_static.reserve(nPredict);
// maps_predict_dynamic.clear();
// maps_predict_dynamic.reserve(nPredict);
// weights_predict.clear();
// weights_predict.reserve(nPredict);
// cardinalities_predict.clear();
// cardinalities_predict.reserve(nPredict);
// resample_idx_predict.reserve(nPredict);
for ( int i = 0 ; i < n_particles ; i++ )
{
maps_predict_static.insert( maps_predict_static.end(),
config.nPredictParticles,
particles.maps_static[i] ) ;
maps_predict_dynamic.insert( maps_predict_dynamic.end(),
config.nPredictParticles,
particles.maps_dynamic[i] ) ;
cardinalities_predict.insert( cardinalities_predict.end(),
config.nPredictParticles,
particles.cardinalities[i] ) ;
float new_weight = particles.weights[i] - safeLog(config.nPredictParticles) ;
// DEBUG_VAL(new_weight) ;
weights_predict.insert( weights_predict.end(), config.nPredictParticles,
new_weight ) ;
resample_idx_predict.insert(resample_idx_predict.end(),
config.nPredictParticles,
particles.resample_idx[i]) ;
}
// DEBUG_VAL(maps_predict.size()) ;
DEBUG_MSG("saving duplicated maps") ;
DEBUG_MSG("static") ;
particles.maps_static = maps_predict_static ;
DEBUG_MSG("dynamic") ;
particles.maps_dynamic = maps_predict_dynamic ;
DEBUG_MSG("weights") ;
particles.weights = weights_predict ;
DEBUG_MSG("cardinalities") ;
particles.cardinalities = cardinalities_predict ;
particles.resample_idx = resample_idx_predict ;
particles.n_particles = nPredict ;
}
// map prediction
if(config.featureModel==DYNAMIC_MODEL || config.featureModel==MIXED_MODEL)
predictMapMixed(particles) ;
// log time
cudaEventRecord( stop,0 ) ;
cudaEventSynchronize( stop ) ;
float elapsed ;
cudaEventElapsedTime( &elapsed, start, stop ) ;
fstream predictTimeFile( "predicttime.log", fstream::out|fstream::app ) ;
predictTimeFile << elapsed << endl ;
predictTimeFile.close() ;
// clean up
CUDA_SAFE_CALL( cudaFree( dev_states_prior ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_states_predict ) ) ;
free(states_predict) ;
}
/// determine which features are in range
/*!
* Each thread block handles a single particle. The threads in the block
* evaluate the range and bearing [blockDim] features in parallel, looping
* through all of the particle's features.
\param predictedFeatures Features from all particles concatenated into a
single array
\param map_sizes_static Number of features in each particle, so that the function
knows where the boundaries are in predictedFeatures
\param n_particles Total number of particles
\param poses Array of particle poses
\param inRange Pointer to boolean array that is filled by the function.
For each feature in predictedFeatures that is in range of its
respective particle, the corresponding entry in this array is set to
true
\param nInRange Pointer to integer array that is filled by the function.
Should be allocated to have [n_particles] elements. Each entry
represents the number of in range features for each particle.
*/
template<class GaussianType>
__global__ void
computeInRangeKernel( GaussianType *predictedFeatures, int* map_sizes_static,
int n_particles, ConstantVelocityState* poses,
char* inRange, int* n_in_range, int* n_nearly_in_range )
{
int tid = threadIdx.x ;
// total number of predicted features per block
int n_featuresBlock ;
// number of inrange features in the particle
__shared__ int nInRangeBlock ;
__shared__ int n_nearly_in_range_block ;
// vehicle pose of the thread block
ConstantVelocityState blockPose ;
GaussianType feature ;
for ( int p = 0 ; p < n_particles ; p += gridDim.x )
{
if ( p + blockIdx.x < n_particles )
{
int predict_offset = 0 ;
// compute the indexing offset for this particle
int map_idx = p + blockIdx.x ;
for ( int i = 0 ; i < map_idx ; i++ )
predict_offset += map_sizes_static[i] ;
// particle-wide values
if ( tid == 0 )
{
nInRangeBlock = 0 ;
n_nearly_in_range_block = 0 ;
}
blockPose = poses[map_idx] ;
n_featuresBlock = map_sizes_static[map_idx] ;
__syncthreads() ;
// loop through features
for ( int i = 0 ; i < n_featuresBlock ; i += blockDim.x )
{
if ( tid+i < n_featuresBlock )
{
// index of thread feature
int featureIdx = predict_offset + tid + i ;
feature = predictedFeatures[featureIdx] ;
// default value
inRange[featureIdx] = 0 ;
// compute the predicted measurement
REAL dx = feature.mean[0] - blockPose.px ;
REAL dy = feature.mean[1] - blockPose.py ;
REAL r2 = dx*dx + dy*dy ;
REAL r = sqrt(r2) ;
REAL bearing = wrapAngle(atan2f(dy,dx) - blockPose.ptheta) ;
if ( r >= dev_config.minRange &&
r <= dev_config.maxRange &&
fabs(bearing) <= dev_config.maxBearing )
{
atomicAdd( &nInRangeBlock, 1 ) ;
inRange[featureIdx] = 1 ;
}
else if ( r >= 0.8*dev_config.minRange &&
r <= 1.2*dev_config.maxRange &&
fabs(bearing) <= 1.2*dev_config.maxBearing )
{
inRange[featureIdx] = 2 ;
atomicAdd( &n_nearly_in_range_block, 1 ) ;
}
}
}
// store nInrange
__syncthreads() ;
if ( tid == 0 )
{
n_in_range[map_idx] = nInRangeBlock ;
n_nearly_in_range[map_idx] = n_nearly_in_range_block ;
}
}
}
}
///// generates a binomial Poisson cardinality distribution for the in-range features.
//__global__ void
//separateCardinalityKernel( Gaussian2D *features, int* map_offsets,
// REAL* cn_inrange)
//{
// int n = threadIdx.x ;
// int map_idx = blockIdx.x ;
// int n_features = map_offsets[map_idx+1] - map_offsets[map_idx] ;
// int feature_idx = map_offsets[map_idx] + n ;
// REAL* cn_shared = (REAL*)shmem ;
// REAL* weights = (REAL*)&cn_shared[dev_config.maxCardinality+1] ;
// // compute product of weights
// REAL val = 0 ;
// if ( n < n_features )
// {
// val = log(features[ feature_idx ].weight) ;
// }
// sumByReduction( weights, val, n ) ;
// REAL log_alpha = weights[0] ;
// __syncthreads() ;
// // load the polynomial roots into shared memory
// if ( n < n_features )
// {
// weights[n] = (1-features[feature_idx].weight)/features[feature_idx].weight ;
// }
// else
// {
// weights[n] = 0 ;
// }
// // compute full cn using recursive algorithm
// cn_shared[n+1] = 0 ;
// int cn_offset = map_idx*(dev_config.maxCardinality+1) ;
// if ( n == 0 )
// {
// cn_shared[0] = 1 ;
// }
// __syncthreads() ;
// for ( int m = 0 ; m < n_features ; m++ )
// {
// REAL tmp1 = cn_shared[n+1] ;
// REAL tmp2 = cn_shared[n] ;
// __syncthreads() ;
// if ( n < m+1 )
// cn_shared[n+1] = tmp1 - weights[m]*tmp2 ;
// __syncthreads() ;
// }
// if ( n <= n_features )
// {
// int idx = cn_offset + (n_features - n) ;
// cn_inrange[idx] = safeLog(fabs(cn_shared[n]))
// + log_alpha ;
// }
// else
// {
// cn_inrange[cn_offset+n] = LOG0 ;
// }
//}
///// compute partially updated weights and updated means & covariances
///**
// \param features Array of all Gaussians from all particles concatenated together
// \param map_sizes Integer array indicating the number of features per particle.
// \param n_particles Number of particles
// \param n_measurements Number of measurements
// \param poses Array of particle poses
// \param w_partial Array of partially updated weights computed by kernel
// */
//__global__ void
//cphdPreUpdateKernel(Gaussian2D *features, int* map_offsets,
// int n_particles, int n_measurements, ConstantVelocityState* poses,
// Gaussian2D* updated_features, REAL* w_partial, REAL* qdw )
//{
// int tid = threadIdx.x + blockIdx.x*blockDim.x ;
// int n_total = (n_measurements+1)*map_offsets[n_particles] ;
// if ( tid >= n_total)
// return ;
// int map_idx = 0 ;
// while ( map_offsets[map_idx]*(n_measurements+1) <= tid )
// {
// map_idx++ ;
// }
// map_idx-- ;
// int n_features = map_offsets[map_idx+1] - map_offsets[map_idx] ;
// int offset = map_offsets[map_idx]*(n_measurements+1) ;
// int feature_idx = floor( (float)(tid-offset)/(n_measurements) ) ;
// if ( feature_idx >= n_features ) // non-detect thread
// {
// int predict_idx = tid - n_features*n_measurements - offset
// + map_offsets[map_idx] ;
// updated_features[tid] = features[predict_idx] ;
// }
// else if ( tid < n_total ) // update thread
// {
// int z_idx = tid - feature_idx*n_measurements - offset ;
// Gaussian2D feature = features[map_offsets[map_idx]+feature_idx] ;
// Gaussian2D updated_feature ;
// RangeBearingMeasurement z = Z[z_idx] ;
// RangeBearingMeasurement z_predict ;
// ConstantVelocityState pose = poses[map_idx] ;
// REAL K[4] = {0,0,0,0} ;
// REAL sigmaInv[4] = {0,0,0,0} ;
// REAL covUpdate[4] = {0,0,0,0} ;
// REAL featurePd = 0 ;
// REAL detSigma = 0 ;
// computePreUpdateComponents( pose, feature, K, covUpdate,
// &detSigma, sigmaInv, &featurePd,
// &z_predict ) ;
// // innovation
// REAL innov[2] = {0,0} ;
// innov[0] = z.range - z_predict.range ;
// innov[1] = wrapAngle(z.bearing - z_predict.bearing) ;
// // updated mean
// updated_feature.mean[0] = feature.mean[0] + K[0]*innov[0] + K[2]*innov[1] ;
// updated_feature.mean[1] = feature.mean[1] + K[1]*innov[0] + K[3]*innov[1] ;
// // updated covariances
// updated_feature.cov[0] = covUpdate[0] ;
// updated_feature.cov[1] = covUpdate[1] ;
// updated_feature.cov[2] = covUpdate[2] ;
// updated_feature.cov[3] = covUpdate[3] ;
// // single-object likelihood
// REAL dist = innov[0]*innov[0]*sigmaInv[0] +
// innov[0]*innov[1]*(sigmaInv[1] + sigmaInv[2]) +
// innov[1]*innov[1]*sigmaInv[3] ;
// // partially updated weight
// updated_feature.weight = safeLog(featurePd) + safeLog(feature.weight)
// - 0.5*dist- safeLog(2*M_PI) - 0.5*safeLog(detSigma) ;
// updated_features[tid] = updated_feature ;
// int w_idx = map_offsets[map_idx]*n_measurements ;
// w_idx += feature_idx*n_measurements + z_idx ;
// w_partial[w_idx] = updated_feature.weight ;
// if ( z_idx == 0 )
// {
// offset = map_offsets[map_idx] ;
// qdw[offset+feature_idx] = safeLog(1-featurePd) + safeLog(feature.weight) ;
// }
// }
//}
///// computes the elementary symmetric polynomial coefficients
///**
// This kernel produces the coefficients of the elementary symmetric function
// for the CPHD update
// \param w_partial Array of partially updated weights
// \param map_sizes Number of features per particle
// \param n_measurements Number of measurements
// \param esf Array of ESF coefficients computed by kernel
// \param esfd Array of ESF coefficients, with each measurement omitted
// */
//__global__ void
//computeEsfKernel( REAL* w_partial, int* map_offsets, int n_measurements,
// REAL* esf, REAL* esfd )
//{
// REAL* lambda = (REAL*)shmem ;
// REAL* esf_shared = (REAL*)&lambda[n_measurements] ;
// // determine indexing offsets
// int tid = threadIdx.x ;
// int map_idx = blockIdx.x ;
// int block_offset = n_measurements*map_offsets[map_idx] ;
// int n_features = map_offsets[map_idx+1] - map_offsets[map_idx] ;
// // compute log lambda
// lambda[tid] = 0 ;
// int idx = block_offset + tid ;
// REAL max_val = -FLT_MAX ;
// for ( int j = 0 ; j < n_features ; j++)
// {
// REAL tmp = w_partial[idx] ;
// REAL tmp_max = fmax(tmp,max_val) ;
// lambda[tid] = exp( max_val - tmp_max )*lambda[tid]
// + exp( tmp - tmp_max ) ;
// max_val = tmp_max ;
// idx += n_measurements ;
// }
// lambda[tid] = safeLog(lambda[tid]) + max_val
// + safeLog(dev_config.clutterRate)
// - safeLog(dev_config.clutterDensity) ;
// __syncthreads() ;
// // compute full esf using recursive algorithm
// esf_shared[tid+1] = 0 ;
// int esf_offset = map_idx*(n_measurements+1) ;
// if ( tid == 0 )
// {
// esf_shared[0] = 1 ;
// esf[esf_offset] = 0 ;
// }
// __syncthreads() ;
// for ( int m = 0 ; m < n_measurements ; m++ )
// {
// REAL tmp1 = esf_shared[tid+1] ;
// REAL tmp2 = esf_shared[tid] ;
// __syncthreads() ;
// if ( tid < m+1 )
// {
//// REAL tmp_sum ;
//// max_val = fmax(tmp1, lambda[m]+tmp2) ;
//// tmp_sum = exp(tmp1-max_val) + exp(lambda[m]+tmp2-max_val) ;
//// esf_shared[tid+1] = safeLog( fabs(tmp_sum) ) + max_val ;
// esf_shared[tid+1] = tmp1 - exp(lambda[m])*tmp2 ;
// }
// __syncthreads() ;
// }
// esf[esf_offset+tid+1] = log(fabs(esf_shared[tid+1])) ;
// // compute esf's for detection terms
// for ( int m = 0 ; m < n_measurements ; m++ )
// {
// int esfd_offset = n_measurements*n_measurements*map_idx + m*n_measurements ;
//// esf_shared[tid+1] = LOG0 ;
// esf_shared[tid+1] = 0 ;
// if ( tid == 0 )
// {
//// esf_shared[0] = 0 ;
//// esfd[esfd_offset] = 0 ;
// esf_shared[0] = 1 ;
// esfd[esfd_offset] = 0 ;
// }
// __syncthreads() ;
// int k = 0 ;
// for ( int n = 0 ; n < n_measurements ; n++ )
// {
// REAL tmp1 = esf_shared[tid+1] ;
// REAL tmp2 = esf_shared[tid] ;
// __syncthreads() ;
// if ( n != m )
// {
// if ( tid < k+1 )
// {
//// REAL tmp_sum ;
//// max_val = fmax(tmp1,lambda[n]+tmp2) ;
//// tmp_sum = exp(tmp1-max_val) - exp(lambda[n]+tmp2-max_val) ;
//// esf_shared[tid+1] = safeLog( fabs(tmp_sum) ) + max_val ;
// esf_shared[tid+1] = tmp1 - exp(lambda[n])*tmp2 ;
// }
// k++ ;
// }
// __syncthreads() ;
// }
// if ( tid < (n_measurements-1) )
// esfd[esfd_offset+tid+1] = log(fabs(esf_shared[tid+1])) ;
// }
//}
///// compute the multi-object likelihoods for the CPHD update
///**
// This kernel computes the terms denoted as Psi in Vo's Analytic CPHD paper, and
// their inner products with the predicted cardinality distribution. It also
// produces the updated cardinality
// */
//__global__ void
//computePsiKernel( Gaussian2D* features_predict, REAL* cn_predict, REAL* esf,
// REAL* esfd, int* map_offsets,
// int n_measurements, REAL* qdw, REAL* dev_factorial,
// REAL* dev_C, REAL* dev_cn_clutter, REAL* cn_update,
// REAL* innerprod_psi0, REAL* innerprod_psi1,
// REAL* innerprod_psi1d )
//{
// int n = threadIdx.x ;
// REAL psi0 = 0 ;
// REAL psi1 = 0 ;
// int map_idx = blockIdx.x ;
// int cn_offset = (dev_config.maxCardinality+1)*map_idx ;
// int esf_offset = (n_measurements+1)*map_idx ;
// int stop_idx = 0 ;
// REAL max_val0 = 0 ;
// REAL max_val1 = 0 ;
// REAL* shdata = (REAL*)shmem ;
// // compute the (log) inner product < q_D, w >
// int map_offset = map_offsets[map_idx] ;
// int n_features = map_offsets[map_idx+1] - map_offsets[map_idx] ;
// REAL innerprod_qdw = 0 ;
// max_val0 = qdw[map_offset] ;
// for ( int j = 0 ; j < n_features ; j+=blockDim.x )
// {
// REAL val = -FLT_MAX ;
// if ( j+n < n_features )
// val = qdw[map_offset+j+n] ;
// maxByReduction(shdata,val,n) ;
// max_val0 = fmax(max_val0,shdata[0]) ;
// __syncthreads() ;
// }
// for ( int j = 0 ; j < n_features ; j+= blockDim.x )
// {
// REAL val = 0 ;
// if ( (j+n) < n_features )
// val = exp(qdw[map_offset+j+n]-max_val0) ;
// sumByReduction( shdata, val, n ) ;
// innerprod_qdw += shdata[0] ;
// __syncthreads() ;
// }
// innerprod_qdw = safeLog(innerprod_qdw) + max_val0 ;
// // compute the (log) inner product < 1, w >
// REAL wsum = 0 ;
// for ( int j = 0 ; j < n_features ; j += blockDim.x )
// {
// REAL val = 0 ;
// if ( (j+n) < n_features )
// val = features_predict[map_offset+j+n].weight ;
// sumByReduction( shdata, val, n );
// wsum += shdata[0] ;
// __syncthreads() ;
// }
// wsum = safeLog(wsum) ;
// // compute (log) PSI0(n) and PSI1(n), using log-sum-exp
// max_val0 = -FLT_MAX ;
// max_val1 = -FLT_MAX ;
// stop_idx = min(n,n_measurements) ;
// for ( int j = 0 ; j <= stop_idx ; j++ )
// {
// // PSI0
// REAL p_coeff = dev_C[n+j*(dev_config.maxCardinality+1)]
// + dev_factorial[j] ;
// REAL aux = dev_factorial[n_measurements-j]
// + dev_cn_clutter[n_measurements-j] + esf[esf_offset+ j]
// - n*wsum ;
// REAL tmp = aux + p_coeff + (n-j)*innerprod_qdw ;
// psi0 = exp(max_val0-fmax(max_val0,tmp))*psi0
// + exp(tmp - fmax(max_val0,tmp) ) ;
// max_val0 = fmax(max_val0,tmp) ;
// // PSI1
// p_coeff = dev_C[n+(j+1)*(dev_config.maxCardinality+1)]
// + dev_factorial[j+1] ;
// tmp = aux + p_coeff + (n-(j+1))*innerprod_qdw ;
// psi1 = exp(max_val1-fmax(max_val1,tmp))*psi1
// + exp(tmp - fmax(max_val1,tmp) ) ;
// max_val1 = fmax(max_val1,tmp) ;
// }
// psi0 = safeLog(psi0) + max_val0 ;
// psi1 = safeLog(psi1) + max_val1 ;
// // (log) inner product of PSI0 and predicted cardinality distribution, using
// // log-sum-exp trick
// REAL val = psi0 + cn_predict[cn_offset+n] ;
// maxByReduction( shdata, val, n ) ;
// max_val0 = shdata[0] ;
// __syncthreads() ;
// sumByReduction( shdata, exp(val-max_val0), n ) ;
// if ( n==0 )
// innerprod_psi0[map_idx] = safeLog(shdata[0]) + max_val0 ;
// // (log) inner product of PSI1 and predicted cardinality distribution, using
// // log-sum-exp trick
// val = psi1 + cn_predict[cn_offset+n] ;
// maxByReduction( shdata, psi1+cn_predict[cn_offset+n], n ) ;
//// shdata[n] = psi1+cn_predict[cn_offset+n] ;
// max_val1 = shdata[0] ;
// __syncthreads() ;
// sumByReduction( shdata, exp( val - max_val1 ), n ) ;
// if ( n == 0 )
// innerprod_psi1[map_idx] = safeLog(shdata[0]) + max_val1 ;
//// __syncthreads() ;
// // PSI1 detection terms
// stop_idx = min(n_measurements - 1, n) ;
// for ( int m = 0 ; m < n_measurements ; m++ )
// {
// int esfd_offset = map_idx * n_measurements * n_measurements
// + m*n_measurements ;
// REAL psi1d = 0 ;
// max_val1 = -FLT_MAX ;
// for ( int j = 0 ; j <= stop_idx ; j++ )
// {
// REAL p_coeff = dev_C[n+(j+1)*(dev_config.maxCardinality+1)]
// + dev_factorial[j+1] ;
// REAL aux = dev_factorial[n_measurements-1-j]
// + dev_cn_clutter[n_measurements-1-j] + esfd[esfd_offset+ j]
// - n*wsum ;
// REAL tmp = aux + p_coeff + (n-(j+1))*innerprod_qdw ;
// psi1d = exp(max_val1-fmax(max_val1,tmp))*psi1d
// + exp(tmp - fmax(max_val1,tmp) ) ;
// max_val1 = fmax(max_val1,tmp) ;
// }
// psi1d = safeLog(psi1d) + max_val1 ;
// val = psi1d + cn_predict[cn_offset+n] ;
// maxByReduction( shdata, val, n ) ;
// max_val1 = shdata[0] ;
// __syncthreads() ;
// sumByReduction( shdata, exp(val-max_val1), n ) ;
// if ( n == 0 )
// innerprod_psi1d[map_idx*n_measurements+m] = safeLog(shdata[0]) + max_val1 ;
// __syncthreads() ;
// }
// // compute log updated cardinality
// cn_update[cn_offset+n] = cn_predict[cn_offset+n] + psi0
// - innerprod_psi0[map_idx] ;
//}
///// perform the gaussian mixture CPHD weight update
///**
// This kernel takes the results produced by the previous three kernels in the
// CPHD pipeline (PreUpdate, ComputeEsf, and ComputePsi) and applies them to
// update the weights of the Gaussian Mixture as in Vo's paper
// Kernel organization: One thread block per particle. Each thread updates all
// the features for one measurement.
// */
//__global__ void
//cphdUpdateKernel( int* map_offsets, int n_measurements,
// REAL* innerprod_psi0, REAL* innerprod_psi1,
// REAL* innerprod_psi1d, bool* merged_flags,
// Gaussian2D* updated_features )
//{
// int z_idx = threadIdx.x ;
// int map_idx = blockIdx.x ;
// int offset = (n_measurements+1)*map_offsets[map_idx] ;
// int n_features = map_offsets[map_idx+1] - map_offsets[map_idx] ;
// // detection update
// REAL psi1d = innerprod_psi1d[n_measurements*map_idx+z_idx] ;
// for ( int j = 0 ; j < n_features ; j++ )
// {
// REAL tmp = updated_features[offset+z_idx].weight
// + psi1d - innerprod_psi0[map_idx] + safeLog(dev_config.clutterRate)
// - safeLog(dev_config.clutterDensity) ;
// updated_features[offset+z_idx].weight = exp(tmp) ;
// if ( exp(tmp) >= dev_config.minFeatureWeight )
// merged_flags[offset + z_idx] = false ;
// else
// merged_flags[offset + z_idx] = true ;
// offset += n_measurements ;
// }
// // non-detection updates
// for ( int j = 0 ; j < n_features ; j += blockDim.x )
// {
// if ( j+z_idx < n_features )
// {
// int nondetect_idx = offset + j + z_idx ;
// REAL tmp = safeLog(updated_features[nondetect_idx].weight)
// + innerprod_psi1[map_idx] - innerprod_psi0[map_idx]
// + safeLog(1-dev_config.pd) ;
// updated_features[nondetect_idx].weight = exp(tmp) ;
// if ( exp(tmp) >= dev_config.minFeatureWeight )
// merged_flags[nondetect_idx] = false ;
// else
// merged_flags[nondetect_idx] = true ;
// }
// }
//}
__global__ void
preUpdateSynthKernel(ConstantVelocityState* poses,
int* pose_indices,
Gaussian2D* features_predict,
REAL* features_pd,
int n_features, int n_measure,
Gaussian2D* features_preupdate){
int tid = blockIdx.x*blockDim.x + threadIdx.x ;
for ( int i = tid ; i < n_features ; i+= gridDim.x*blockDim.x){
// get vehicle pose
ConstantVelocityState pose = poses[pose_indices[i]] ;
// get predicted feature
Gaussian2D feature_predict = features_predict[i] ;
// predicted measurement
REAL dx = feature_predict.mean[0] - pose.px ;
REAL dy = feature_predict.mean[1] - pose.py ;
REAL r2 = dx*dx + dy*dy ;
REAL r = sqrt(r2) ;
REAL bearing = wrapAngle(atan2f(dy,dx) - pose.ptheta) ;
// probability of detection
REAL feature_pd = 0 ;
if ( r <= dev_config.maxRange && fabsf(bearing) <= dev_config.maxBearing )
feature_pd = dev_config.pd ;
features_pd[i] = feature_pd ;
// measurement jacobian wrt feature
REAL J[4] ;
J[0] = dx/r ;
J[2] = dy/r ;
J[1] = -dy/r2 ;
J[3] = dx/r2 ;
REAL* P = feature_predict.cov ;
// BEGIN Maple-Generated expressions
// innovation covariance
REAL sigma[4] ;
sigma[0] = (P[0] * J[0] + J[2] * P[1]) * J[0] + (J[0] * P[2] + P[3] * J[2]) * J[2] + pow(dev_config.stdRange,2) ;
sigma[1] = (P[0] * J[1] + J[3] * P[1]) * J[0] + (J[1] * P[2] + P[3] * J[3]) * J[2];
sigma[2] = (P[0] * J[0] + J[2] * P[1]) * J[1] + (J[0] * P[2] + P[3] * J[2]) * J[3];
sigma[3] = (P[0] * J[1] + J[3] * P[1]) * J[1] + (J[1] * P[2] + P[3] * J[3]) * J[3] + pow(dev_config.stdBearing,2) ;
// enforce symmetry
sigma[1] = (sigma[1]+sigma[2])/2 ;
sigma[2] = sigma[1] ;
REAL det_sigma = sigma[0]*sigma[3] - sigma[1]*sigma[2] ;
// inverse of sigma
REAL S[4] ;
S[0] = sigma[3]/(det_sigma) ;
S[1] = -sigma[1]/(det_sigma) ;
S[2] = -sigma[2]/(det_sigma) ;
S[3] = sigma[0]/(det_sigma) ;
// Kalman gain
REAL K[4] ;
K[0] = S[0]*(P[0]*J[0] + P[2]*J[2]) + S[1]*(P[0]*J[1] + P[2]*J[3]) ;
K[1] = S[0]*(P[1]*J[0] + P[3]*J[2]) + S[1]*(P[1]*J[1] + P[3]*J[3]) ;
K[2] = S[2]*(P[0]*J[0] + P[2]*J[2]) + S[3]*(P[0]*J[1] + P[2]*J[3]) ;
K[3] = S[2]*(P[1]*J[0] + P[3]*J[2]) + S[3]*(P[1]*J[1] + P[3]*J[3]) ;
REAL cov_update[4] ;
cov_update[0] = ((1 - K[0] * J[0] - K[2] * J[1]) * P[0] + (-K[0] * J[2] - K[2] * J[3]) * P[1]) * (1 - K[0] * J[0] - K[2] * J[1]) + ((1 - K[0] * J[0] - K[2] * J[1]) * P[2] + (-K[0] * J[2] - K[2] * J[3]) * P[3]) * (-K[0] * J[2] - K[2] * J[3]) + pow(K[0], 2) * dev_config.stdRange*dev_config.stdRange + pow(K[2], 2) * dev_config.stdBearing*dev_config.stdBearing;
cov_update[2] = ((1 - K[0] * J[0] - K[2] * J[1]) * P[0] + (-K[0] * J[2] - K[2] * J[3]) * P[1]) * (-K[1] * J[0] - K[3] * J[1]) + ((1 - K[0] * J[0] - K[2] * J[1]) * P[2] + (-K[0] * J[2] - K[2] * J[3]) * P[3]) * (1 - K[1] * J[2] - K[3] * J[3]) + K[0] * dev_config.stdRange*dev_config.stdRange * K[1] + K[2] * dev_config.stdBearing*dev_config.stdBearing * K[3];
cov_update[1] = ((-K[1] * J[0] - K[3] * J[1]) * P[0] + (1 - K[1] * J[2] - K[3] * J[3]) * P[1]) * (1 - K[0] * J[0] - K[2] * J[1]) + ((-K[1] * J[0] - K[3] * J[1]) * P[2] + (1 - K[1] * J[2] - K[3] * J[3]) * P[3]) * (-K[0] * J[2] - K[2] * J[3]) + K[0] * dev_config.stdRange*dev_config.stdRange * K[1] + K[2] * dev_config.stdBearing*dev_config.stdBearing * K[3];
cov_update[3] = ((-K[1] * J[0] - K[3] * J[1]) * P[0] + (1 - K[1] * J[2] - K[3] * J[3]) * P[1]) * (-K[1] * J[0] - K[3] * J[1]) + ((-K[1] * J[0] - K[3] * J[1]) * P[2] + (1 - K[1] * J[2] - K[3] * J[3]) * P[3]) * (1 - K[1] * J[2] - K[3] * J[3]) + pow(K[1], 2) * dev_config.stdRange*dev_config.stdRange + pow(K[3], 2) * dev_config.stdBearing*dev_config.stdBearing;
REAL innov[2] ;
REAL dist = 0 ;
for ( int m = 0 ; m < n_measure ; m++ )
{
int idx = m*n_features + i ;
innov[0] = Z[m].range - r ;
innov[1] = wrapAngle(Z[m].bearing - bearing) ;
features_preupdate[idx].mean[0] = feature_predict.mean[0] + K[0]*innov[0] + K[2]*innov[1] ;
features_preupdate[idx].mean[1] = feature_predict.mean[1] + K[1]*innov[0] + K[3]*innov[1] ;
for ( int n = 0 ; n < 4 ; n++ )
features_preupdate[idx].cov[n] = cov_update[n] ;
// compute single object likelihood
dist = innov[0]*innov[0]*S[0] +
innov[0]*innov[1]*(S[1] + S[2]) +
innov[1]*innov[1]*S[3] ;
if(Z[m].label==STATIC_MEASUREMENT || !dev_config.labeledMeasurements)
{
// partially update weight (log-transformed)
features_preupdate[idx].weight = safeLog(feature_pd)
+ safeLog(feature_predict.weight)
- 0.5*dist
- safeLog(2*M_PI)
- 0.5*safeLog(det_sigma) ;
}
else
{
features_preupdate[idx].weight = safeLog(0) ;
}
}
}
}
__global__ void
preUpdateSynthKernel(ConstantVelocityState* poses,
int* pose_indices,
Gaussian4D* features_predict,
REAL* features_pd,
int n_features, int n_measure,
Gaussian4D* features_preupdate){
int tid = blockIdx.x*blockDim.x + threadIdx.x ;
for ( int i = tid ; i < n_features ; i+= gridDim.x*blockDim.x){
// get vehicle pose
ConstantVelocityState pose = poses[pose_indices[i]] ;
// get predicted feature
Gaussian4D feature_predict = features_predict[i] ;
// predicted measurement
REAL dx = feature_predict.mean[0] - pose.px ;
REAL dy = feature_predict.mean[1] - pose.py ;
REAL r2 = dx*dx + dy*dy ;
REAL r = sqrt(r2) ;
REAL bearing = wrapAngle(atan2f(dy,dx) - pose.ptheta) ;
// probability of detection
REAL feature_pd = 0 ;
if ( r <= dev_config.maxRange && fabsf(bearing) <= dev_config.maxBearing )
feature_pd = dev_config.pd ;
features_pd[i] = feature_pd ;
// measurement jacobian wrt feature
REAL J[4] ;
J[0] = dx/r ;
J[2] = dy/r ;
J[1] = -dy/r2 ;
J[3] = dx/r2 ;
REAL* P = feature_predict.cov ;
// BEGIN Maple-Generated expressions
// innovation covariance
REAL sigma[4] ;
REAL var_range = pow(dev_config.stdRange,2) ;
REAL var_bearing = pow(dev_config.stdBearing,2) ;
sigma[0] = J[0] * (P[0] * J[0] + P[4] * J[2]) + J[2] * (P[1] * J[0] + P[5] * J[2]) + var_range;
sigma[1] = J[1] * (P[0] * J[0] + P[4] * J[2]) + J[3] * (P[1] * J[0] + P[5] * J[2]);
sigma[2] = J[0] * (P[0] * J[1] + P[4] * J[3]) + J[2] * (P[1] * J[1] + P[5] * J[3]);
sigma[3] = J[1] * (P[0] * J[1] + P[4] * J[3]) + J[3] * (P[1] * J[1] + P[5] * J[3]) + var_bearing;
// enforce symmetry
sigma[1] = (sigma[1]+sigma[2])/2 ;
sigma[2] = sigma[1] ;
// makePositiveDefinite(sigma) ;
REAL det_sigma = sigma[0]*sigma[3] - sigma[1]*sigma[2] ;
REAL S[4] ;
S[0] = sigma[3]/(det_sigma) ;
S[1] = -sigma[1]/(det_sigma) ;
S[2] = -sigma[2]/(det_sigma) ;
S[3] = sigma[0]/(det_sigma) ;
// Kalman gain
REAL K[8] ;
K[0] = P[0] * (J[0] * S[0] + J[1] * S[1])
+ P[4] * (J[2] * S[0] + J[3] * S[1]);
K[1] = P[1] * (J[0] * S[0] + J[1] * S[1])
+ P[5] * (J[2] * S[0] + J[3] * S[1]);
K[2] = P[2] * (J[0] * S[0] + J[1] * S[1])
+ P[6] * (J[2] * S[0] + J[3] * S[1]);
K[3] = P[3] * (J[0] * S[0] + J[1] * S[1])
+ P[7] * (J[2] * S[0] + J[3] * S[1]);
K[4] = P[0] * (J[0] * S[2] + J[1] * S[3])
+ P[4] * (J[2] * S[2] + J[3] * S[3]);
K[5] = P[1] * (J[0] * S[2] + J[1] * S[3])
+ P[5] * (J[2] * S[2] + J[3] * S[3]);
K[6] = P[2] * (J[0] * S[2] + J[1] * S[3])
+ P[6] * (J[2] * S[2] + J[3] * S[3]);
K[7] = P[3] * (J[0] * S[2] + J[1] * S[3])
+ P[7] * (J[2] * S[2] + J[3] * S[3]);
// Updated covariance (Joseph Form)
REAL cov_update[16] ;
cov_update[0] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + var_range * pow( K[0], 2) + var_bearing * pow( K[4], 2);
cov_update[1] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + K[0] * var_range * K[1] + K[4] * var_bearing * K[5];
cov_update[2] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + P[2] * (1 - K[0] * J[0] - K[4] * J[1]) + P[6] * (-K[0] * J[2] - K[4] * J[3]) + K[0] * var_range * K[2] + K[4] * var_bearing * K[6];
cov_update[3] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (1 - K[0] * J[0] - K[4] * J[1]) + P[4] * (-K[0] * J[2] - K[4] * J[3])) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (1 - K[0] * J[0] - K[4] * J[1]) + P[5] * (-K[0] * J[2] - K[4] * J[3])) + P[3] * (1 - K[0] * J[0] - K[4] * J[1]) + P[7] * (-K[0] * J[2] - K[4] * J[3]) + K[0] * var_range * K[3] + K[4] * var_bearing * K[7];
cov_update[4] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + K[0] * var_range * K[1] + K[4] * var_bearing * K[5];
cov_update[5] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + var_range * pow( K[1], 2) + var_bearing * pow( K[5], 2);
cov_update[6] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + P[2] * (-K[1] * J[0] - K[5] * J[1]) + P[6] * (1 - K[1] * J[2] - K[5] * J[3]) + K[1] * var_range * K[2] + K[5] * var_bearing * K[6];
cov_update[7] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[1] * J[0] - K[5] * J[1]) + P[4] * (1 - K[1] * J[2] - K[5] * J[3])) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[1] * J[0] - K[5] * J[1]) + P[5] * (1 - K[1] * J[2] - K[5] * J[3])) + P[3] * (-K[1] * J[0] - K[5] * J[1]) + P[7] * (1 - K[1] * J[2] - K[5] * J[3]) + K[1] * var_range * K[3] + K[5] * var_bearing * K[7];
cov_update[8] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + K[0] * var_range * K[2] + K[4] * var_bearing * K[6];
cov_update[9] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + K[1] * var_range * K[2] + K[5] * var_bearing * K[6];
cov_update[10] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + P[2] * (-K[2] * J[0] - K[6] * J[1]) + P[6] * (-K[2] * J[2] - K[6] * J[3]) + P[10] + var_range * pow( K[2], 2) + var_bearing * pow( K[6], 2);
cov_update[11] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[2] * J[0] - K[6] * J[1]) + P[4] * (-K[2] * J[2] - K[6] * J[3]) + P[8]) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[2] * J[0] - K[6] * J[1]) + P[5] * (-K[2] * J[2] - K[6] * J[3]) + P[9]) + P[3] * (-K[2] * J[0] - K[6] * J[1]) + P[7] * (-K[2] * J[2] - K[6] * J[3]) + P[11] + K[2] * var_range * K[3] + K[6] * var_bearing * K[7];
cov_update[12] = (1 - K[0] * J[0] - K[4] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[0] * J[2] - K[4] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + K[0] * var_range * K[3] + K[4] * var_bearing * K[7];
cov_update[13] = (-K[1] * J[0] - K[5] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (1 - K[1] * J[2] - K[5] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + K[1] * var_range * K[3] + K[5] * var_bearing * K[7];
cov_update[14] = (-K[2] * J[0] - K[6] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[2] * J[2] - K[6] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + P[2] * (-K[3] * J[0] - K[7] * J[1]) + P[6] * (-K[3] * J[2] - K[7] * J[3]) + P[14] + K[2] * var_range * K[3] + K[6] * var_bearing * K[7];
cov_update[15] = (-K[3] * J[0] - K[7] * J[1]) * (P[0] * (-K[3] * J[0] - K[7] * J[1]) + P[4] * (-K[3] * J[2] - K[7] * J[3]) + P[12]) + (-K[3] * J[2] - K[7] * J[3]) * (P[1] * (-K[3] * J[0] - K[7] * J[1]) + P[5] * (-K[3] * J[2] - K[7] * J[3]) + P[13]) + P[3] * (-K[3] * J[0] - K[7] * J[1]) + P[7] * (-K[3] * J[2] - K[7] * J[3]) + P[15] + var_range * pow( K[3], 2) + var_bearing * pow( K[7], 2);
REAL innov[2] ;
REAL dist = 0 ;
for ( int m = 0 ; m < n_measure ; m++ )
{
int idx = m*n_features+i ;
innov[0] = Z[m].range - r ;
innov[1] = wrapAngle(Z[m].bearing - bearing) ;
features_preupdate[idx].mean[0] = feature_predict.mean[0] + K[0]*innov[0] + K[4]*innov[1] ;
features_preupdate[idx].mean[1] = feature_predict.mean[1] + K[1]*innov[0] + K[5]*innov[1] ;
features_preupdate[idx].mean[2] = feature_predict.mean[2] + K[2]*innov[0] + K[6]*innov[1] ;
features_preupdate[idx].mean[3] = feature_predict.mean[3] + K[3]*innov[0] + K[7]*innov[1] ;
for ( int n = 0 ; n < 16 ; n++ )
features_preupdate[idx].cov[n] = cov_update[n] ;
// compute single object likelihood
dist = innov[0]*innov[0]*S[0] +
innov[0]*innov[1]*(S[1] + S[2]) +
innov[1]*innov[1]*S[3] ;
if(Z[m].label==DYNAMIC_MEASUREMENT || !dev_config.labeledMeasurements)
{
// partially update weight (log-transformed)
features_preupdate[idx].weight = safeLog(feature_pd)
+ safeLog(feature_predict.weight)
- 0.5*dist
- safeLog(2*M_PI)
- 0.5*safeLog(det_sigma) ;
}
else
{
features_preupdate[idx].weight = safeLog(0) ;
}
}
}
}
/// perform the gaussian mixture PHD update
/**
PHD update algorithm as in Vo & Ma 2006.
\param inRangeFeatures Array of in-range Gaussians, with which the PHD
update will be performed
\param map_sizes_static Integer array of sizes of each particle's map
\param n_measure Number of measurements
\param poses Array of particle poses
\param compatibleZ char array which will be computed by the kernel.
Indicates which measurements have been found compatible with an existing
gaussian.
\param updated_features Stores the updated Gaussians computed by the kernel
\param mergedFeatures Stores the post-merge updated Gaussians computed by
the kernel.
\param mergedSizes Stores the number of Gaussians left in each map after
merging. This is required because the same amount of memory is allocated
for both updated_features and mergedFeatures. The indexing boundaries for
the maps will be the same, but only the first n gaussians after the
boundary will be valid for the mergedFeatures array.
\param mergedFlags Array of booleans used by the merging algorithm to keep
track of which features have already be merged.
\param particleWeights New particle weights after PHD update
*/
template <class GaussianType>
__global__ void
phdUpdateKernel(GaussianType* features_predict,
REAL* featurePd,
GaussianType* features_preupdate,
GaussianType* features_birth,
int* map_offsets,
int n_particles, int n_measure,
GaussianType* features_update,
bool* merge_flags,
REAL* particle_weights)
{
// shared memory variables
__shared__ REAL sdata[256] ;
REAL particle_weight = 0 ;
REAL cardinality_predict = 0 ;
int update_offset = 0 ;
int n_features = 0 ;
int n_update = 0 ;
int predict_offset = 0 ;
int preupdate_offset = 0 ;
int birth_offset = 0 ;
// initialize variables
int tid = threadIdx.x ;
// pre-update variables
GaussianType feature ;
// update variables
int preupdate_stride = map_offsets[n_particles] ;
REAL w_partial = 0 ;
int updateIdx = 0 ;
// loop over particles
for ( int map_idx = blockIdx.x ; map_idx < n_particles ; map_idx += gridDim.x )
{
// initialize map-specific variables
predict_offset = map_offsets[map_idx] ;
update_offset = predict_offset*(n_measure+1) +
map_idx*n_measure ;
preupdate_offset = predict_offset ;
n_features = map_offsets[map_idx+1] - map_offsets[map_idx] ;
n_update = (n_features)*(n_measure+1) + n_measure ;
particle_weight = 0 ;
cardinality_predict = 0.0 ;
birth_offset = map_idx*n_measure ;
// loop over predicted features + newborn features
for ( int j = 0 ; j < (n_features+n_measure) ; j += blockDim.x )
{
int feature_idx = j + tid ;
w_partial = 0 ;
if ( feature_idx < n_features )
{
// persistent feature
feature = features_predict[predict_offset+feature_idx] ;
REAL pd = featurePd[predict_offset+feature_idx] ;
// save non-detection term
int idx_nondetect = update_offset
+ feature_idx ;
copy_gaussians(feature,features_update[idx_nondetect]) ;
features_update[idx_nondetect].weight *= (1-pd) ;
// save the detection terms
for (int m = 0 ; m < n_measure ; m++){
int preupdate_idx = m*preupdate_stride +
preupdate_offset + feature_idx ;
int update_idx = update_offset + n_features +
m*n_features + feature_idx ;
copy_gaussians(features_preupdate[preupdate_idx],
features_update[update_idx]) ;
}
w_partial = pd*feature.weight ;
}
else if (feature_idx < n_features+n_measure)
{
// newborn feature
// find measurement corresponding to current thread
int z_idx = feature_idx - n_features ;
int idx_birth = update_offset + n_features
+ n_measure*n_features + z_idx ;
copy_gaussians(features_birth[birth_offset+z_idx],
features_update[idx_birth]) ;
w_partial = dev_config.birthWeight ;
}
else
{
// thread does not correspond to a feature
w_partial = 0 ;
}
// compute predicted cardinality
sumByReduction(sdata, w_partial, tid) ;
cardinality_predict += sdata[0] ;
__syncthreads() ;
}
// cuPrintf("particle_weight=%f\n",particle_weight) ;
// compute the weight normalizers
for ( int i = 0 ; i < n_measure ; i++ )
{
REAL log_normalizer = 0 ;
REAL val = 0 ;
REAL sum = 0 ;
GaussianType* ptr_update = features_update
+ update_offset + n_features + i*n_features ;
// cuPrintf("%f\n",features_update[0].weight) ;
if (n_features > 0)
{
// find the maximum from all the log partial weights
for ( int j = 0 ; j < (n_features) ; j += blockDim.x )
{
int feature_idx = j + tid ;
if ( feature_idx < n_features )
val = exp(ptr_update[feature_idx].weight) ;
else
val = 0 ;
sumByReduction(sdata,val,tid);
sum += sdata[0] ;
}
// add clutter density and birth weight
sum += dev_config.clutterDensity ;
sum += dev_config.birthWeight ;
// put normalizer in log form
log_normalizer = safeLog(sum) ;
}
else
{
sum = dev_config.clutterDensity + dev_config.birthWeight ;
log_normalizer = safeLog(sum) ;
}
// compute final feature weights
for ( int j = 0 ; j < (n_features+1) ; j += blockDim.x )
{
int feature_idx = j + tid ;
if ( feature_idx <= n_features)
{
if ( feature_idx < n_features )
{
// update detection weight
updateIdx = feature_idx ;
}
else if ( feature_idx == n_features )
{
// update birth term weight
updateIdx = (n_measure-i)*n_features + i ;
// cuPrintf("%d\n",updateIdx) ;
}
ptr_update[updateIdx].weight =
exp(ptr_update[updateIdx].weight-log_normalizer) ;
}
}
// update the pose particle weights
if ( tid == 0 )
{
// cuPrintf("normalizer = %f\n",log_normalizer) ;
particle_weight += log_normalizer ;
}
}
// Particle weighting
__syncthreads() ;
if ( tid == 0 )
{
if (dev_config.particleWeighting==0){
particle_weight -= cardinality_predict ;
particle_weights[map_idx] = particle_weight ;
}
else if (dev_config.particleWeighting==1){
// compute predicted cardinality
float cn_predict = 0 ;
for ( int i = 0 ; i < n_features ; i++ ){
cn_predict += features_predict[predict_offset+i].weight ;
}
// compute updated cardnality
float cn_update = 0 ;
for ( int i = 0 ; i < n_features*(n_measure+1) + n_measure ; i++){
cn_update += features_update[update_offset+i].weight ;
}
particle_weights[map_idx] = n_measure*dev_config.clutterDensity
+ cn_update - cn_predict
- dev_config.clutterRate ;
}
}
// set the merging flags
for ( int j = 0 ; j < n_update ; j+=blockDim.x)
{
int feature_idx = j+tid ;
if (feature_idx < n_update)
{
int idx = update_offset+feature_idx ;
if (features_update[idx].weight<dev_config.minFeatureWeight)
merge_flags[idx] = true ;
else
merge_flags[idx] = false;
}
}
}
}
__global__ void
phdUpdateKernelMixed(ConstantVelocityState* poses,
Gaussian2D* features_predict_static,
Gaussian4D* features_predict_dynamic,
int* map_offsets_static, int* map_offsets_dynamic,
int n_particles, int n_measure,
Gaussian2D* features_update_static,
Gaussian4D* features_update_dynamic,
bool* merge_flags_static, bool* merge_flags_dynamic,
REAL* particle_weights)
{
__shared__ REAL sdata[256] ;
int tid = threadIdx.x ;
int map_idx = 0 ;
int feature_idx = 0 ;
ConstantVelocityState pose ;
int n_features_static = 0 ;
int n_features_dynamic = 0 ;
int predict_offset_static = 0 ;
int predict_offset_dynamic = 0 ;
int update_offset_static = 0 ;
int update_offset_dynamic = 0 ;
int n_update_static = 0 ;
int n_update_dynamic = 0 ;
REAL cardinality_predict = 0 ;
REAL particle_weight = 0 ;
// loop over particles
for ( int p = 0 ; p < n_particles ; p += gridDim.x )
{
map_idx = p + blockIdx.x ;
if ( map_idx < n_particles )
{
// compute offsets for the current map
n_features_static = map_offsets_static[map_idx+1]
- map_offsets_static[map_idx] ;
n_features_dynamic = map_offsets_dynamic[map_idx+1]
- map_offsets_dynamic[map_idx] ;
predict_offset_static = map_offsets_static[map_idx] ;
predict_offset_dynamic = map_offsets_dynamic[map_idx] ;
update_offset_static = predict_offset_static
+ n_measure*predict_offset_static
+ map_idx*n_measure ;
update_offset_dynamic = predict_offset_dynamic
+ n_measure*predict_offset_dynamic
+ map_idx*n_measure ;
n_update_static = n_features_static
+ n_measure*n_features_static
+ n_measure ;
n_update_dynamic = n_features_dynamic
+ n_measure*n_features_dynamic
+ n_measure ;
// get the corresponding vehicle pose
pose = poses[map_idx] ;
__syncthreads() ;
// reinitialize predicted cardinality
cardinality_predict = 0 ;
// initialize log(particle_weight) update to 1
particle_weight = 0 ;
for ( int j = 0 ; j < (n_features_static+n_measure+n_features_dynamic+n_measure) ; j += blockDim.x )
{
feature_idx = j + tid ;
// Distribution of features to threads:
// [persistent_static | birth_static | persistent_dynamic | birth_dynamic ]
REAL feature_pd = 0 ;
REAL val = 0 ;
bool is_static = (feature_idx < n_features_static+n_measure) ;
bool is_dynamic = (feature_idx < n_features_static+n_measure+n_features_dynamic+n_measure)
&& !is_static ;
if ( is_static)
{
Gaussian2D* ptr_update = NULL ;
Gaussian2D* ptr_nondetect = NULL ;
if(feature_idx < n_features_static)
{
ptr_nondetect = features_update_static
+ update_offset_static + feature_idx ;
ptr_update = ptr_nondetect + n_features_static ;
computePreUpdate( pose, features_predict_static[predict_offset_static+feature_idx],
n_features_static, n_measure, feature_pd,
*ptr_nondetect, ptr_update ) ;
val = feature_pd
*features_predict_static[feature_idx].weight ;
}
else if (feature_idx < n_features_static+n_measure)
{
int z_idx = feature_idx - n_features_static ;
ptr_update = features_update_static + update_offset_static
+ n_features_static
+ n_measure*n_features_static + z_idx ;
computeBirth(pose, Z[z_idx],*ptr_update) ;
}
}
else if(is_dynamic)
{
int feature_idx_dynamic = feature_idx
- n_features_static - n_measure ;
Gaussian4D* ptr_update = NULL ;
Gaussian4D* ptr_nondetect = NULL ;
if(feature_idx_dynamic < n_features_dynamic)
{
ptr_nondetect = features_update_dynamic
+ update_offset_dynamic + feature_idx_dynamic ;
ptr_update = ptr_nondetect + n_features_dynamic ;
computePreUpdate( pose, features_predict_dynamic[predict_offset_dynamic+feature_idx_dynamic],
n_features_dynamic, n_measure, feature_pd,
*ptr_nondetect,ptr_update ) ;
val = feature_pd
*features_predict_dynamic[feature_idx_dynamic].weight ;
}
else if(feature_idx_dynamic < n_features_dynamic+n_measure )
{
int z_idx = feature_idx_dynamic - n_features_dynamic ;
ptr_update = features_update_dynamic + update_offset_dynamic
+ n_features_dynamic
+ n_features_dynamic*n_measure + z_idx ;
computeBirth(pose, Z[z_idx],*ptr_update) ;
// cuPrintf("Dynamic birth weight: %f\n",ptr_update->weight) ;
val = 0 ;
}
}
else
{
// not a valid feature index
val = 0 ;
}
// compute predicted cardinality
sumByReduction( sdata, val, tid );
cardinality_predict += sdata[0] ;
__syncthreads() ;
}
// finish updating weights - loop over measurements
for ( int m = 0 ; m < n_measure ; m++ )
{
// pointers offset to updated features corresponding to current
// measurement
Gaussian2D* ptr_static = features_update_static
+ update_offset_static
+ n_features_static
+ m*(n_features_static) ;
Gaussian4D* ptr_dynamic = features_update_dynamic
+ update_offset_dynamic
+ n_features_dynamic
+ m*(n_features_dynamic) ;
REAL normalizer = 0 ;
// normalizer is the sum of partially updated weights
// corresponding to current measurement.
for ( int j = 0 ; j < n_features_static+n_features_dynamic ; j += blockDim.x )
{
feature_idx = j + tid ;
// REAL val = -FLT_MAX ;
REAL val = 0 ;
bool is_static = feature_idx < n_features_static ;
bool is_dynamic = (feature_idx < n_features_static+n_features_dynamic)
&& !is_static ;
if ( is_static )
val = exp(ptr_static[feature_idx].weight) ;
else if(is_dynamic)
val = exp(ptr_dynamic[feature_idx-n_features_static].weight) ;
sumByReduction(sdata,val,tid);
normalizer += sdata[0] ;
}
normalizer += dev_config.clutterDensity
+ dev_config.birthWeight ;
// we get 2 birth terms when measurements are unlabeled
if ( !dev_config.labeledMeasurements )
normalizer += dev_config.birthWeight ;
normalizer = safeLog(normalizer) ;
// loop through features corresponding to current measurement,
// and divide by normalizer.
for ( int j = 0 ; j < n_features_static+1+n_features_dynamic+1 ; j+=blockDim.x )
{
feature_idx = j+tid ;
int idx_update = - 1 ;
bool is_static = (feature_idx < n_features_static+1) ;
bool is_dynamic = (feature_idx<(n_features_static+1+n_features_dynamic+1))
&& ~is_static ;
if ( is_static)
{
int idx_update = -1 ;
if(feature_idx < n_features_static)
{
idx_update = feature_idx ;
}
else if (feature_idx == n_features_static)
{
idx_update = (n_measure-m)*n_features_static + m ;
}
ptr_static[idx_update].weight =
exp(ptr_static[idx_update].weight - normalizer) ;
}
else if(is_dynamic)
{
int feature_idx_dynamic = feature_idx
- n_features_static - 1 ;
if(feature_idx_dynamic < n_features_dynamic)
{
idx_update = feature_idx_dynamic ;
}
else if(feature_idx_dynamic==n_features_dynamic)
{
idx_update = (n_measure-m)*n_features_dynamic + m ;
}
ptr_dynamic[idx_update].weight =
exp(ptr_dynamic[idx_update].weight - normalizer) ;
}
}
// multiply particle weight update by normalizer
__syncthreads() ;
particle_weight += normalizer ;
}
// finish updating particle weight
particle_weight -= cardinality_predict ;
if ( tid == 0){
if (dev_config.particleWeighting==0){
particle_weights[map_idx] = particle_weight ;
}
else if (dev_config.particleWeighting == 1){
// compute predicted cardinality
float cn_predict = 0 ;
for ( int i = 0 ; i < n_features_static ; i++ ){
cn_predict +=
features_predict_static[predict_offset_static+i].weight ;
}
for ( int i = 0 ; i < n_features_dynamic ; i++ ){
cn_predict +=
features_predict_dynamic[predict_offset_dynamic+i].weight ;
}
cn_predict += n_measure*dev_config.birthWeight ;
// compute updated cardnality
float cn_update = 0 ;
for ( int i = 0 ; i < n_features_static*(n_measure+1) + n_measure ; i++){
cn_update += features_update_static[update_offset_static+i].weight ;
}
for ( int i = 0 ; i < n_features_dynamic*(n_measure+1) + n_measure ; i++){
cn_update += features_update_dynamic[update_offset_dynamic+i].weight ;
}
particle_weights[map_idx] = n_measure*dev_config.clutterDensity
+ cn_update - cn_predict
- dev_config.clutterRate ;
}
}
}
// set the merging flags
for ( int j = 0 ; j < n_update_static+n_update_dynamic ; j+=blockDim.x)
{
int feature_idx = j+tid ;
bool is_static = (feature_idx < n_update_static) ;
bool is_dynamic = (feature_idx < n_update_static+n_update_dynamic)
&& !is_static ;
if (is_static)
{
if (features_update_static[update_offset_static+feature_idx].weight<dev_config.minFeatureWeight)
merge_flags_static[update_offset_static+feature_idx] = true ;
else
merge_flags_static[update_offset_static+feature_idx] = false;
}
else if(is_dynamic)
{
feature_idx = feature_idx-n_update_static ;
if (features_update_dynamic[update_offset_dynamic+feature_idx].weight<dev_config.minFeatureWeight)
merge_flags_dynamic[update_offset_dynamic+feature_idx] = true;
else
merge_flags_dynamic[update_offset_dynamic+feature_idx] = false;
}
}
}
}
template <class GaussianType>
__global__ void
phdUpdateMergeKernel(GaussianType* updated_features,
GaussianType* mergedFeatures, int *mergedSizes,
bool *mergedFlags, int* map_offsets, int n_particles )
{
__shared__ GaussianType maxFeature ;
__shared__ GaussianType mergedFeature ;
__shared__ REAL sdata[256] ;
__shared__ int mergedSize ;
__shared__ int update_offset ;
__shared__ int n_update ;
int tid = threadIdx.x ;
REAL dist ;
GaussianType feature ;
clearGaussian(feature) ;
int dims = getGaussianDim(feature) ;
// loop over particles
for ( int p = 0 ; p < n_particles ; p += gridDim.x )
{
int map_idx = p + blockIdx.x ;
if ( map_idx < n_particles )
{
// initialize shared vars
if ( tid == 0)
{
update_offset = map_offsets[map_idx] ;
n_update = map_offsets[map_idx+1] - map_offsets[map_idx] ;
mergedSize = 0 ;
}
__syncthreads() ;
while(true)
{
// initialize the output values to defaults
if ( tid == 0 )
{
maxFeature.weight = -1 ;
clearGaussian(mergedFeature) ;
}
sdata[tid] = -1 ;
__syncthreads() ;
// find the maximum feature with parallel reduction
for ( int i = update_offset ; i < update_offset + n_update ; i += blockDim.x)
{
int idx = i + tid ;
if ( idx < (update_offset + n_update) )
{
if( !mergedFlags[idx] )
{
if (sdata[tid] == -1 ||
updated_features[(unsigned int)sdata[tid]].weight < updated_features[idx].weight )
{
sdata[tid] = idx ;
}
}
}
}
__syncthreads() ;
for ( int s = blockDim.x/2 ; s > 0 ; s >>= 1 )
{
if ( tid < s )
{
if ( sdata[tid] == -1 )
sdata[tid] = sdata[tid+s] ;
else if ( sdata[tid+s] >= 0 )
{
if(updated_features[(unsigned int)sdata[tid]].weight <
updated_features[(unsigned int)sdata[tid+s]].weight )
{
sdata[tid] = sdata[tid+s] ;
}
}
}
__syncthreads() ;
}
if ( sdata[0] == -1 || maxFeature.weight == 0 )
break ;
else if(tid == 0)
maxFeature = updated_features[ (unsigned int)sdata[0] ] ;
__syncthreads() ;
// find features to merge with max feature
REAL sval0 = 0 ;
// REAL sval1 = 0 ;
// REAL sval2 = 0 ;
clearGaussian(feature) ;
for ( int i = update_offset ; i < update_offset + n_update ; i += blockDim.x )
{
int idx = tid + i ;
if ( idx < update_offset+n_update )
{
if ( !mergedFlags[idx] )
{
if ( dev_config.distanceMetric == 0 )
dist = computeMahalDist(maxFeature, updated_features[idx]) ;
else if ( dev_config.distanceMetric == 1)
dist = computeHellingerDist(maxFeature, updated_features[idx]) ;
if ( dist < dev_config.minSeparation )
{
feature.weight += updated_features[idx].weight ;
for ( int j = 0 ; j < dims ; j++ )
feature.mean[j] += updated_features[idx].weight*updated_features[idx].mean[j] ;
}
}
}
}
// merge means and weights
sval0 = feature.weight ;
sumByReduction(sdata, sval0, tid) ;
if ( tid == 0 )
mergedFeature.weight = sdata[0] ;
__syncthreads() ;
if ( mergedFeature.weight == 0 )
break ;
for ( int j = 0 ; j < dims ; j++ )
{
sval0 = feature.mean[j] ;
sumByReduction(sdata,sval0,tid);
if( tid == 0 )
mergedFeature.mean[j] = sdata[0]/mergedFeature.weight ;
__syncthreads() ;
}
// merge the covariances
sval0 = 0 ;
// sval1 = 0 ;
// sval2 = 0 ;
clearGaussian(feature) ;
for ( int i = update_offset ; i < update_offset+n_update ; i += blockDim.x )
{
int idx = tid + i ;
if ( idx < update_offset+n_update )
{
if (!mergedFlags[idx])
{
if ( dev_config.distanceMetric == 0 )
dist = computeMahalDist(maxFeature, updated_features[idx]) ;
else if ( dev_config.distanceMetric == 1)
dist = computeHellingerDist(maxFeature, updated_features[idx]) ;
if ( dist < dev_config.minSeparation )
{
// use the mean of the local gaussian variable
// to store the innovation vector
for (int j = 0 ; j < dims ; j++)
{
feature.mean[j] = mergedFeature.mean[j]
- updated_features[idx].mean[j] ;
}
for (int j = 0 ; j < dims ; j++ )
{
REAL outer = feature.mean[j] ;
for ( int k = 0 ; k < dims ; k++)
{
REAL inner = feature.mean[k] ;
feature.cov[j*dims+k] +=
updated_features[idx].weight*
(updated_features[idx].cov[j*dims+k]
+ outer*inner) ;
}
}
mergedFlags[idx] = true ;
}
}
}
}
for ( int j = 0 ; j < dims*dims ; j++)
{
sval0 = feature.cov[j] ;
sumByReduction(sdata,sval0,tid);
if ( tid == 0 )
mergedFeature.cov[j] = sdata[0]/mergedFeature.weight ;
__syncthreads() ;
}
if ( tid == 0 )
{
force_symmetric_covariance(mergedFeature) ;
int mergeIdx = update_offset + mergedSize ;
copy_gaussians(mergedFeature,mergedFeatures[mergeIdx]) ;
mergedSize++ ;
}
__syncthreads() ;
}
__syncthreads() ;
// save the merged map size
if ( tid == 0 )
mergedSizes[map_idx] = mergedSize ;
}
} // end loop over particles
return ;
}
template <class GaussianType>
void
prepareUpdateInputs(vector<vector<GaussianType> > maps,
ConstantVelocityState* dev_poses,
int n_particles, int n_measure,
GaussianType*& dev_maps_inrange,
int*& dev_map_offsets, GaussianType*& dev_maps_updated,
bool*& dev_merged_flags,
vector<GaussianType>& features_in,
vector<GaussianType>& features_out1 ,
vector<GaussianType>& features_out2,
vector<int>& n_in_range_vec,
vector<int>& n_out_range1_vec,
vector<int>& n_out_range2_vec )
{
//------- Variable Declarations ---------//
vector<GaussianType> concat ;
vector<int> map_sizes(n_particles) ;
int nThreads = 0 ;
// map offsets
vector<int> map_offsets_in(n_particles+1,0) ;
vector<int> map_offsets_out(n_particles+1,0) ;
// device variables
GaussianType* dev_maps = NULL ;
int* dev_map_sizes = NULL ;
int* dev_n_in_range = NULL ;
int* dev_n_out_range2 = NULL ;
char* dev_in_range = NULL ;
int total_features = 0 ;
// in/out range book-keeping variables
int n_in_range = 0 ;
int n_out_range = 0 ;
int idx_in = 0 ;
int idx_out = 0 ;
int idx_out2 = 0 ;
int n_out_range1 = 0 ;
int n_out_range2 = 0 ;
vector<char> in_range ;
//------- End Variable Declarations -----//
///////////////////////////////////////////////////////////////////////////
//
// concatenate all the maps together for parallel processing
//
///////////////////////////////////////////////////////////////////////////
for ( unsigned int n = 0 ; n < n_particles ; n++ )
{
concat.insert( concat.end(),
maps[n].begin(),
maps[n].end() ) ;
map_sizes[n] = maps[n].size() ;
// keep track of largest map feature count
if ( map_sizes[n] > nThreads )
nThreads = map_sizes[n] ;
nThreads = min(nThreads,256) ;
total_features += map_sizes[n] ;
}
// allocate device space for map sizes
CUDA_SAFE_CALL(
cudaMalloc( (void**)&dev_map_sizes,
n_particles*sizeof(int) ) ) ;
if ( total_features > 0)
{
///////////////////////////////////////////////////////////////////////
//
// split features into in/out range parts
//
///////////////////////////////////////////////////////////////////////
// allocate device memory
CUDA_SAFE_CALL(
cudaMalloc( (void**)&dev_maps,
total_features*sizeof(GaussianType) ) ) ;;
CUDA_SAFE_CALL(
cudaMalloc( (void**)&dev_n_in_range,
n_particles*sizeof(int) ) ) ;
CUDA_SAFE_CALL(
cudaMalloc( (void**)&dev_n_out_range2,
n_particles*sizeof(int) ) ) ;
CUDA_SAFE_CALL(
cudaMalloc( (void**)&dev_in_range,
total_features*sizeof(char) ) ) ;
// copy inputs
CUDA_SAFE_CALL(
cudaMemcpy( dev_maps, &concat[0], total_features*sizeof(GaussianType),
cudaMemcpyHostToDevice )
) ;
CUDA_SAFE_CALL(
cudaMemcpy( dev_map_sizes, &map_sizes[0], n_particles*sizeof(int),
cudaMemcpyHostToDevice )
) ;
// kernel launch
DEBUG_MSG("launching computeInRangeKernel") ;
DEBUG_VAL(nThreads) ;
computeInRangeKernel<<<n_particles,nThreads>>>
( dev_maps, dev_map_sizes, n_particles, dev_poses, dev_in_range,
dev_n_in_range, dev_n_out_range2 ) ;
CUDA_SAFE_THREAD_SYNC();
// allocate outputs
in_range.resize(total_features);
// copy outputs
CUDA_SAFE_CALL(
cudaMemcpy( &in_range[0],dev_in_range,
total_features*sizeof(char),
cudaMemcpyDeviceToHost )
) ;
CUDA_SAFE_CALL(
cudaMemcpy( &n_in_range_vec[0],dev_n_in_range,n_particles*sizeof(int),
cudaMemcpyDeviceToHost )
) ;
CUDA_SAFE_CALL(
cudaMemcpy( &n_out_range2_vec[0],dev_n_out_range2,n_particles*sizeof(int),
cudaMemcpyDeviceToHost )
) ;
// get total number of in-range features
for ( int i = 0 ; i < n_particles ; i++ )
{
n_in_range += n_in_range_vec[i] ;
n_out_range1_vec[i] = maps[i].size() - n_in_range_vec[i]
- n_out_range2_vec[i] ;
n_out_range2 += n_out_range2_vec[i] ;
}
// divide features into in-range/out-of-range parts
n_out_range = total_features - n_in_range ;
n_out_range1 = n_out_range - n_out_range2 ;
DEBUG_VAL(n_in_range) ;
DEBUG_VAL(n_out_range1) ;
DEBUG_VAL(n_out_range2) ;
features_in.resize(n_in_range) ;
features_out1.resize(n_out_range1) ;
features_out2.resize(n_out_range2) ;
for ( int i = 0 ; i < total_features ; i++ )
{
if (in_range[i] == 1)
features_in[idx_in++] = concat[i] ;
else if (in_range[i] == 2 )
features_out2[idx_out2++] = concat[i] ;
else
features_out1[idx_out++] = concat[i] ;
}
// free memory
CUDA_SAFE_CALL( cudaFree( dev_maps ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_in_range ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_n_in_range ) ) ;
// perform an (inclusive) prefix scan on the map sizes to determine indexing
// offsets for each map
for ( int i = 1 ; i < n_particles+1 ; i++ )
{
map_offsets_in[i] = map_offsets_in[i-1] + n_in_range_vec[i-1] ;
map_offsets_out[i] = map_offsets_out[i-1] + n_in_range_vec[i-1] ;
}
}
/************************************************
*
* Prepare PHD update inputs
*
************************************************/
int n_update = n_in_range*(n_measure+1) + n_measure*n_particles ;
// allocate device memory
CUDA_SAFE_CALL(
cudaMalloc( (void**)&dev_maps_inrange,
n_in_range*sizeof(GaussianType) ) ) ;
CUDA_SAFE_CALL(
cudaMalloc( (void**)&dev_map_offsets,
(n_particles+1)*sizeof(int) ) ) ;
CUDA_SAFE_CALL(
cudaMalloc((void**)&dev_maps_updated,
n_update*sizeof(GaussianType)) ) ;
CUDA_SAFE_CALL(
cudaMalloc((void**)&dev_merged_flags,
n_update*sizeof(bool)) ) ;
// copy inputs
CUDA_SAFE_CALL(
cudaMemcpy( dev_maps_inrange, &features_in[0],
n_in_range*sizeof(GaussianType),
cudaMemcpyHostToDevice )
) ;
CUDA_SAFE_CALL( cudaMemcpy( dev_map_offsets, &map_offsets_in[0],
(n_particles+1)*sizeof(int),
cudaMemcpyHostToDevice ) ) ;
}
template <class GaussianType>
/**
* @brief pruneMap Prune a gaussian mixture.
*
* The elements of dev_maps whose corresponding flag equals true are removed
* and the resulting array is written back into dev_maps. dev_merged_flags is
* also overwritten with an array of the appropriate number of false elements.
* map_sizes is overwritten with the sizes of the pruned maps
*
* @param dev_maps Device pointer to array of gaussian features
* @param dev_merged_flags Device array of boolean flags, true = prune.
* @param map_sizes Vector of map sizes
* @param n_gaussians Total number of gaussians.
* @return Total number of gaussians after pruning
*/
int
pruneMap(GaussianType*& dev_maps,
bool*& dev_merged_flags,
std::vector<int>& map_sizes,
int n_gaussians){
// wrap pointers in thrust types
thrust::device_ptr<GaussianType> ptr_maps(dev_maps) ;
thrust::device_ptr<bool> ptr_flags(dev_merged_flags) ;
// create the output vector, with same size as the input
thrust::device_vector<GaussianType> dev_pruned(n_gaussians) ;
// do the pruning
thrust::remove_copy_if(ptr_maps,ptr_maps+n_gaussians,
ptr_flags,
dev_pruned.begin(),
thrust::identity<bool>()) ;
// recalculate map sizes
int n_particles = map_sizes.size() ;
std::vector<int> map_sizes_pruned(n_particles,0) ;
host_vector<bool> flags(ptr_flags,ptr_flags+n_gaussians) ;
int n = 0 ;
int n_pruned = 0 ;
for ( int i = 0 ; i < n_particles ; i++){
for( int j = 0 ; j < map_sizes[i] ; j++){
if (!flags[n++]){
map_sizes_pruned[i]++ ;
n_pruned++ ;
}
}
}
// cout << "pruned features: " << endl ;
// for ( int i = 0 ; i < n_pruned ; i++ ){
// GaussianType g = dev_pruned[i] ;
// print_feature(g) ;
// }
// store pruned results
thrust::device_free(ptr_maps) ;
ptr_maps = thrust::device_malloc<GaussianType>(n_pruned) ;
thrust::copy_n(dev_pruned.begin(),n_pruned,ptr_maps) ;
dev_maps = raw_pointer_cast(ptr_maps) ;
thrust::device_free(ptr_flags) ;
ptr_flags = thrust::device_malloc<bool>(n_pruned) ;
thrust::fill(ptr_flags,ptr_flags+n_pruned,false) ;
dev_merged_flags = raw_pointer_cast(ptr_flags) ;
map_sizes = map_sizes_pruned ;
return n_pruned ;
}
template <class GaussianType>
void
mergeAndCopyMaps(GaussianType*& dev_maps_updated,
bool*& dev_merged_flags,
vector<GaussianType> features_out1,
vector<GaussianType> features_out2,
vector<int> n_in_range_vec,
vector<int> n_out_range1_vec,
vector<int> n_out_range2_vec,
int n_particles, int n_measure, int n_update,
vector<vector<GaussianType> >& maps_output )
{
vector<int> map_offsets(n_particles+1) ;
size_t combined_size ;
GaussianType* maps_merged = NULL ;
int* map_sizes_merged = NULL ;
int offset = 0 ;
int offset_updated = 0 ;
int offset_out = 0 ;
// device variables
GaussianType* dev_maps_merged = NULL ;
GaussianType* dev_maps_combined = NULL ;
bool* dev_merged_flags_combined = NULL ;
int* dev_n_merged = NULL ;
int* dev_map_offsets = NULL ;
int n_out_range1 = features_out1.size() ;
int n_out_range2 = features_out2.size() ;
// prune low-weighted features
DEBUG_VAL(n_update) ;
vector<int> map_sizes_inrange(n_particles) ;
for ( int n = 0 ; n < n_particles ; n++){
map_sizes_inrange[n] = n_in_range_vec[n]*(n_measure+1) + n_measure ;
}
int n_pruned = pruneMap(dev_maps_updated,dev_merged_flags,
map_sizes_inrange,n_update) ;
DEBUG_VAL(n_pruned) ;
// recombine updated in-range map with nearly in-range map do merging
DEBUG_MSG("Recombining maps") ;
combined_size = (n_pruned+n_out_range2)*sizeof(GaussianType) ;
CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_maps_combined, combined_size ) ) ;
CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_merged_flags_combined,
(n_pruned+n_out_range2)*sizeof(bool) ) ) ;
map_offsets[0] = 0 ;
for ( int n = 0 ; n < n_particles ; n++ )
{
// in-range map for particle n
int n_in_range_n = map_sizes_inrange[n] ;
CUDA_SAFE_CALL( cudaMemcpy( dev_maps_combined+offset,
dev_maps_updated+offset_updated,
n_in_range_n*sizeof(GaussianType),
cudaMemcpyDeviceToDevice) ) ;
CUDA_SAFE_CALL( cudaMemcpy( dev_merged_flags_combined+offset,
dev_merged_flags+offset_updated,
n_in_range_n*sizeof(bool)
,cudaMemcpyDeviceToDevice ) ) ;
offset += n_in_range_n ;
offset_updated += n_in_range_n ;
// nearly in range map for particle n
vector<char> merged_flags_out(n_out_range2_vec[n],0) ;
CUDA_SAFE_CALL( cudaMemcpy( dev_maps_combined+offset,
&features_out2[offset_out],
n_out_range2_vec[n]*sizeof(GaussianType),
cudaMemcpyHostToDevice ) ) ;
CUDA_SAFE_CALL( cudaMemcpy( dev_merged_flags_combined+offset,
&merged_flags_out[0],
n_out_range2_vec[n]*sizeof(bool),
cudaMemcpyHostToDevice) ) ;
offset += n_out_range2_vec[n] ;
offset_out += n_out_range2_vec[n] ;
map_offsets[n+1] = offset ;
}
DEBUG_VAL(combined_size) ;
CUDA_SAFE_CALL( cudaMalloc((void**)&dev_maps_merged,
combined_size ) ) ;
CUDA_SAFE_CALL( cudaMalloc((void**)&dev_n_merged,
n_particles*sizeof(int) ) ) ;
CUDA_SAFE_CALL( cudaMalloc((void**)&dev_map_offsets,
(n_particles+1)*sizeof(int) ) ) ;
CUDA_SAFE_CALL( cudaMemcpy( dev_map_offsets, &map_offsets[0],
(n_particles+1)*sizeof(int),
cudaMemcpyHostToDevice ) ) ;
CUDA_SAFE_THREAD_SYNC() ;
thrust::device_ptr<bool> ptr_flags(dev_merged_flags_combined) ;
thrust::fill(ptr_flags, ptr_flags+n_pruned+n_out_range2,false) ;
DEBUG_MSG("launching phdUpdateMergeKernel") ;
phdUpdateMergeKernel<<<n_particles,256>>>
( dev_maps_combined, dev_maps_merged, dev_n_merged,
dev_merged_flags_combined, dev_map_offsets, n_particles ) ;
CUDA_SAFE_THREAD_SYNC() ;
// // copy one feature and look at it
// GaussianType feature_test ;
// CUDA_SAFE_CALL(cudaMemcpy(&feature_test,dev_maps_merged,sizeof(GaussianType),cudaMemcpyDeviceToHost) ) ;
// cout << "first merged feature: " << endl ;
// print_feature(feature_test) ;
// allocate outputs
DEBUG_MSG("Allocating update and merge outputs") ;
maps_merged = (GaussianType*)malloc( combined_size ) ;
map_sizes_merged = (int*)malloc( n_particles*sizeof(int) ) ;
// copy outputs
CUDA_SAFE_CALL(
cudaMemcpy( maps_merged, dev_maps_merged,
combined_size,
cudaMemcpyDeviceToHost ) ) ;
CUDA_SAFE_CALL(
cudaMemcpy( map_sizes_merged, dev_n_merged,
n_particles*sizeof(int),
cudaMemcpyDeviceToHost ) ) ;
offset_updated = 0 ;
offset_out = 0 ;
for ( int i = 0 ; i < n_particles ; i++ )
{
offset_updated = map_offsets[i] ;
// DEBUG_VAL(map_sizes_merged[i]) ;
maps_output[i].assign(maps_merged+offset_updated,
maps_merged+offset_updated+map_sizes_merged[i]) ;
// recombine with out-of-range features, if any
if ( n_out_range1 > 0 && n_out_range1_vec[i] > 0 )
{
maps_output[i].insert( maps_output[i].end(),
features_out1.begin()+offset_out,
features_out1.begin()+offset_out+n_out_range1_vec[i] ) ;
offset_out += n_out_range1_vec[i] ;
}
// cout << "Merged map " << i << endl ;
// for ( int j = 0 ; j < maps_output[i].size() ; j++ ){
// print_feature(maps_output[i][j]) ;
// }
}
free(maps_merged) ;
free(map_sizes_merged) ;
CUDA_SAFE_CALL( cudaFree( dev_maps_combined ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_maps_merged ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_merged_flags_combined ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_n_merged ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_maps_updated) ) ;
CUDA_SAFE_CALL( cudaFree( dev_merged_flags) ) ;
}
SynthSLAM
phdUpdateSynth(SynthSLAM& particles, measurementSet measurements)
{
//------- Variable Declarations ---------//
int n_measure = 0 ;
int n_particles = particles.n_particles ;
DEBUG_VAL(n_particles) ;
vector<int> map_sizes_static(n_particles,0) ;
vector<int> map_sizes_dynamic(n_particles,0) ;
// map offsets
vector<int> map_offsets_in_static(n_particles+1,0) ;
vector<int> map_offsets_out_static(n_particles+1,0) ;
SynthSLAM particlesPreMerge(particles) ;
// device variables
ConstantVelocityState* dev_poses = NULL ;
int *dev_map_offsets_static = NULL ;
int *dev_map_offsets_dynamic = NULL ;
Gaussian2D* dev_maps_inrange_static = NULL ;
Gaussian4D* dev_maps_inrange_dynamic = NULL ;
Gaussian2D* dev_maps_updated_static = NULL ;
Gaussian4D* dev_maps_updated_dynamic = NULL ;
REAL* dev_particle_weights = NULL ;
bool* dev_merged_flags_static = NULL ;
bool* dev_merged_flags_dynamic = NULL ;
// in/out range book-keeping variables
vector<char> in_range ;
vector<int> n_in_range_vec_static(n_particles,0) ;
vector<int> n_in_range_vec_dynamic(n_particles,0) ;
vector<int> n_out_range1_vec_static(n_particles,0) ;
vector<int> n_out_range1_vec_dynamic(n_particles,0) ;
vector<int> n_out_range2_vec_static(n_particles,0) ;
vector<int> n_out_range2_vec_dynamic(n_particles,0) ;
vector<Gaussian2D> features_in_static ;
vector<Gaussian2D> features_out1_static ;
vector<Gaussian2D> features_out2_static ;
vector<Gaussian4D> features_in_dynamic ;
vector<Gaussian4D> features_out1_dynamic ;
vector<Gaussian4D> features_out2_dynamic ;
// output variables
//------- End Variable Declarations -----//
// make a copy of the particles
particlesPreMerge = particles ;
// check for memory limit for storing measurements in constant mem
n_measure = measurements.size() ;
if ( n_measure > 256 )
{
DEBUG_MSG("Warning: maximum number of measurements per time step exceeded") ;
n_measure = 256 ;
}
DEBUG_VAL(n_measure) ;
// copy measurements to device
CUDA_SAFE_CALL(
cudaMemcpyToSymbol( Z, &measurements[0],
n_measure*sizeof(RangeBearingMeasurement) ) ) ;
// copy particle poses to device
CUDA_SAFE_CALL(
cudaMalloc( (void**)&dev_poses,
n_particles*sizeof(ConstantVelocityState) ) ) ;
CUDA_SAFE_CALL(
cudaMemcpy(dev_poses,&particles.states[0],
n_particles*sizeof(ConstantVelocityState),
cudaMemcpyHostToDevice) ) ;
// extract in-range portions of maps, and allocate output arrays
if(config.featureModel==STATIC_MODEL
|| config.featureModel==MIXED_MODEL)
{
prepareUpdateInputs( particles.maps_static,
dev_poses, n_particles, n_measure,
dev_maps_inrange_static, dev_map_offsets_static,
dev_maps_updated_static, dev_merged_flags_static,
features_in_static, features_out1_static,
features_out2_static, n_in_range_vec_static,
n_out_range1_vec_static, n_out_range2_vec_static) ;
}
if(config.featureModel == DYNAMIC_MODEL
|| config.featureModel == MIXED_MODEL)
{
prepareUpdateInputs( particles.maps_dynamic,
dev_poses, n_particles, n_measure,
dev_maps_inrange_dynamic, dev_map_offsets_dynamic,
dev_maps_updated_dynamic, dev_merged_flags_dynamic,
features_in_dynamic, features_out1_dynamic,
features_out2_dynamic, n_in_range_vec_dynamic,
n_out_range1_vec_dynamic,n_out_range2_vec_dynamic) ;
}
// allocate arrays for particle weight update
CUDA_SAFE_CALL(
cudaMalloc((void**)&dev_particle_weights,
n_particles*sizeof(REAL) ) ) ;
// launch kernel
int nBlocks = min(n_particles,32768) ;
int n_update_static = features_in_static.size()*(n_measure+1)
+ n_measure*n_particles ;
int n_update_dynamic = features_in_dynamic.size()*(n_measure+1)
+ n_measure*n_particles ;
cudaPrintfInit(4194304) ;
if(config.featureModel == MIXED_MODEL)
{
DEBUG_MSG("launching phdUpdateKernelMixed") ;
phdUpdateKernelMixed<<<nBlocks,256>>>(
dev_poses, dev_maps_inrange_static, dev_maps_inrange_dynamic,
dev_map_offsets_static, dev_map_offsets_dynamic,
n_particles,n_measure,
dev_maps_updated_static, dev_maps_updated_dynamic,
dev_merged_flags_static, dev_merged_flags_dynamic,
dev_particle_weights);
CUDA_SAFE_THREAD_SYNC() ;
CUDA_SAFE_CALL( cudaFree( dev_maps_inrange_dynamic ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_map_offsets_dynamic ) ) ;
}
else if(config.featureModel==STATIC_MODEL)
{
DEBUG_MSG("Computing Birth terms") ;
int n_births = n_particles*n_measure ;
vector<Gaussian2D> births(n_births) ;
for ( int i = 0 ; i < n_particles ; i++){
ConstantVelocityState pose = particles.states[i] ;
for( int j = 0 ; j < n_measure ; j++){
int idx = i*n_measure + j ;
RangeBearingMeasurement z = measurements[j] ;
// invert measurement
REAL theta = pose.ptheta + z.bearing ;
REAL dx = z.range*cos(theta) ;
REAL dy = z.range*sin(theta) ;
births[idx].mean[0] = pose.px + dx ;
births[idx].mean[1] = pose.py + dy ;
// inverse measurement jacobian
REAL J[4] ;
J[0] = dx/z.range ;
J[1] = dy/z.range ;
J[2] = -dy ;
J[3] = dx ;
// measurement noise
REAL var_range = pow(config.stdRange*config.birthNoiseFactor,2) ;
REAL var_bearing = pow(config.stdBearing*config.birthNoiseFactor,2) ;
// compute birth covariance
births[idx].cov[0] = pow(J[0],2)*var_range +
pow(J[2],2)*var_bearing ;
births[idx].cov[1] = J[0]*J[1]*var_range +
J[2]*J[3]*var_bearing ;
births[idx].cov[2] =
births[idx].cov[1] ;
births[idx].cov[3] = pow(J[1],2)*var_range +
pow(J[3],2)*var_bearing ;
// set birth weight
if(z.label==STATIC_MEASUREMENT || !config.labeledMeasurements)
births[idx].weight = safeLog(config.birthWeight) ;
else
births[idx].weight = safeLog(0) ;
// print_feature(births[idx]) ;
}
}
Gaussian2D* dev_births = NULL ;
CUDA_SAFE_CALL(cudaMalloc(
(void**)&dev_births,
n_births*sizeof(Gaussian2D))) ;
CUDA_SAFE_CALL(cudaMemcpy(
dev_births,&births[0],
n_births*sizeof(Gaussian2D),
cudaMemcpyHostToDevice)) ;
DEBUG_MSG("Computing PHD preupdate") ;
// allocate device memory for pre-updated features
int n_features_total = features_in_static.size() ;
int n_preupdate = n_features_total*n_measure ;
DEBUG_VAL(n_preupdate) ;
Gaussian2D* dev_features_preupdate = NULL ;
CUDA_SAFE_CALL(cudaMalloc((void**)&dev_features_preupdate,
n_preupdate*sizeof(Gaussian2D))) ;
// create pose index vector
vector<int> pose_idx ;
for (int i = 0 ; i < n_particles ; i++){
pose_idx.insert(pose_idx.end(),n_in_range_vec_static[i],i) ;
}
// for ( int i = 0 ; i < pose_idx.size() ; i++){
// DEBUG_VAL(pose_idx[i]) ;
// }
int* dev_pose_idx = NULL ;
CUDA_SAFE_CALL(cudaMalloc((void**)&dev_pose_idx,
n_features_total*sizeof(int))) ;
CUDA_SAFE_CALL(cudaMemcpy(dev_pose_idx,&pose_idx[0],
n_features_total*sizeof(int),
cudaMemcpyHostToDevice)) ;
// create pd vector
REAL* dev_features_pd = NULL ;
CUDA_SAFE_CALL(cudaMalloc((void**)&dev_features_pd,
n_features_total*sizeof(REAL))) ;
// call the preupdate kernel
nBlocks = min(int(ceil(n_features_total/256.0)),65535) ;
DEBUG_VAL(nBlocks) ;
preUpdateSynthKernel<<<nBlocks,256>>>(
dev_poses,dev_pose_idx,dev_maps_inrange_static,
dev_features_pd,n_features_total,
n_measure,dev_features_preupdate) ;
CUDA_SAFE_THREAD_SYNC() ;
// // check preupdate terms
// thrust::device_ptr<Gaussian2D> ptr_preupdate(dev_features_preupdate) ;
// thrust::device_vector<Gaussian2D> dev_preupdate(ptr_preupdate,ptr_preupdate+n_preupdate) ;
// thrust::host_vector<Gaussian2D> preupdate(dev_preupdate) ;
// for ( int i = 0 ; i < preupdate.size() ; i++){
// Gaussian2D g = preupdate[i] ;
// print_feature(g) ;
// }
DEBUG_MSG("launching phdUpdateKernel Static") ;
nBlocks = min(n_particles,65535) ;
phdUpdateKernel<<<nBlocks,256>>>(
dev_maps_inrange_static, dev_features_pd, dev_features_preupdate,
dev_births, dev_map_offsets_static,n_particles,n_measure,
dev_maps_updated_static,dev_merged_flags_static,
dev_particle_weights ) ;
CUDA_SAFE_THREAD_SYNC() ;
cudaFree(dev_births) ;
cudaFree(dev_pose_idx) ;
cudaFree(dev_features_preupdate) ;
cudaFree(dev_features_pd) ;
// // check update terms
// thrust::device_ptr<Gaussian2D> ptr_update(dev_maps_updated_static) ;
// thrust::host_vector<Gaussian2D> update(ptr_update, ptr_update+n_preupdate+n_births+n_features_total) ;
// for ( int i = 0 ; i < update.size() ; i++)
// print_feature(update[i]) ;
}
else if(config.featureModel==DYNAMIC_MODEL)
{
DEBUG_MSG("launching phdUpdateKernel Dynamic") ;
// phdUpdateKernel<<<nBlocks,256>>>(
// dev_poses, dev_maps_inrange_dynamic,dev_map_offsets_dynamic,
// n_particles,n_measure,dev_maps_updated_dynamic,
// dev_merged_flags_dynamic,dev_particle_weights) ;
CUDA_SAFE_THREAD_SYNC() ;
}
cudaPrintfDisplay(stdout,false) ;
cudaPrintfEnd();
CUDA_SAFE_CALL( cudaFree( dev_maps_inrange_static ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_map_offsets_static ) ) ;
// // check input weights against merge flags
// cout << "DEBUG first updated dynamic feature" << endl ;
// bool* merged_flags = (bool*)malloc(n_update_dynamic*sizeof(bool)) ;
// Gaussian4D* maps_updated = (Gaussian4D*)malloc( n_update_dynamic*sizeof(Gaussian4D) ) ;
// cudaMemcpy( merged_flags, dev_merged_flags_dynamic, n_update_dynamic*sizeof(bool),cudaMemcpyDeviceToHost) ;
// CUDA_SAFE_CALL(
// cudaMemcpy( maps_updated, dev_maps_updated_dynamic,
// n_update_dynamic*sizeof(Gaussian4D),
// cudaMemcpyDeviceToHost ) ) ;
// for (int j = 0 ; j < n_update_dynamic ; j++)
// {
// cout << "(" << maps_updated[j].weight << " | " << merged_flags[j] << ")" << endl ;
// }
// print_feature(maps_updated[0]) ;
// print_feature(maps_updated[1]) ;
// free(maps_updated) ;
// free(merged_flags) ;
/******************************************************
*
* Merge updated maps and copy back to host
*
******************************************************/
if(config.featureModel==STATIC_MODEL || config.featureModel==MIXED_MODEL)
{
mergeAndCopyMaps( dev_maps_updated_static,dev_merged_flags_static,
features_out1_static,
features_out2_static, n_in_range_vec_static,
n_out_range1_vec_static,
n_out_range2_vec_static, n_particles,
n_measure,n_update_static, particles.maps_static ) ;
}
if(config.featureModel==DYNAMIC_MODEL || config.featureModel==MIXED_MODEL)
{
mergeAndCopyMaps( dev_maps_updated_dynamic,dev_merged_flags_dynamic,
features_out1_dynamic,
features_out2_dynamic, n_in_range_vec_dynamic,
n_out_range1_vec_dynamic,
n_out_range2_vec_dynamic, n_particles,
n_measure,n_update_dynamic,particles.maps_dynamic ) ;
}
/**********************************************************
*
* Update particle weights
*
*********************************************************/
DEBUG_MSG("Updating Particle Weights") ;
REAL* particle_weights = (REAL*)malloc(n_particles*sizeof(REAL)) ;
CUDA_SAFE_CALL( cudaMemcpy(particle_weights,dev_particle_weights,
n_particles*sizeof(REAL),
cudaMemcpyDeviceToHost ) ) ;
// multiply weights by multi-object likelihood
for ( int i = 0 ; i < n_particles ; i++ )
{
particles.weights[i] += particle_weights[i] ;
}
// normalize
REAL weightSum = logSumExp(particles.weights) ;
DEBUG_VAL(weightSum) ;
for (int i = 0 ; i < n_particles ; i++ )
{
particles.weights[i] -= weightSum ;
// DEBUG_VAL(particles.weights[i]) ;
}
// free memory
CUDA_SAFE_CALL( cudaFree( dev_particle_weights ) ) ;
free(particle_weights) ;
CUDA_SAFE_CALL( cudaFree( dev_poses ) ) ;
return particlesPreMerge ;
}
//SmcPhdSLAM
//phdUpdate(SmcPhdSLAM& slam, measurementSet measurements)
//{
// SmcPhdStatic maps_static_concat ;
// SmcPhdDynamic maps_dynamic_concat ;
// vector<int> map_sizes_static ;
// vector<int> map_sizes_dynamic ;
// // count map sizes
// int n_particles = slam.n_particles ;
// for (int n = 0 ; n < n_particles ; n++ )
// {
// map_sizes_static.push_back(slam.maps_static[n].x.size());
// map_sizes_dynamic.push_back(slam.maps_dynamic[n].x.size());
// }
//}
template <class GaussianType>
vector<GaussianType> computeExpectedMap(vector<vector <GaussianType> > maps,
vector<REAL> weights)
// concatenate all particle maps into a single slam particle and then call the
// existing gaussian pruning algorithm ;
{
DEBUG_MSG("Computing Expected Map") ;
vector<GaussianType> concat ;
int n_particles = maps.size() ;
int* merged_sizes = (int*)malloc(n_particles*sizeof(int)) ;
int* map_sizes = (int*)malloc(n_particles*sizeof(int)) ;
int total_features = 0 ;
for ( int n = 0 ; n < n_particles ; n++ )
{
vector<GaussianType> map = maps[n] ;
for ( int i = 0 ; i < map.size() ; i++ )
map[i].weight *= exp(weights[n]) ;
concat.insert( concat.end(), map.begin(), map.end() ) ;
merged_sizes[n] = map.size() ;
total_features += map.size() ;
}
if ( total_features == 0 )
{
DEBUG_MSG("no features") ;
vector<GaussianType> expected_map(0) ;
return expected_map ;
}
GaussianType* all_features = (GaussianType*)malloc( total_features*sizeof(GaussianType) ) ;
std::copy( concat.begin(), concat.end(), all_features ) ;
bool* merged_flags = (bool*)malloc( total_features*sizeof(sizeof(GaussianType) ) ) ;
std::fill( merged_flags, merged_flags+total_features, false ) ;
GaussianType* maps_out = (GaussianType*)malloc( total_features*sizeof(GaussianType) ) ;
GaussianType* dev_maps_in = NULL ;
GaussianType* dev_maps_out = NULL ;
int* dev_merged_sizes = NULL ;
bool* dev_merged_flags = NULL ;
int* dev_map_sizes = NULL ;
CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_maps_in,
total_features*sizeof(GaussianType) ) ) ;
CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_maps_out,
total_features*sizeof(GaussianType) ) ) ;
CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_merged_sizes,
n_particles*sizeof(int) ) ) ;
CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_map_sizes,
n_particles*sizeof(int) ) ) ;
CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_merged_flags,
total_features*sizeof(bool) ) ) ;
for ( int n = n_particles/2 ; n > 0 ; n >>= 1 )
{
DEBUG_VAL(n) ;
for ( int i = 0 ; i < n ; i++ )
map_sizes[i] = merged_sizes[2*i] + merged_sizes[2*i+1] ;
CUDA_SAFE_CALL( cudaMemcpy( dev_map_sizes, map_sizes,
n*sizeof(int),
cudaMemcpyHostToDevice ) ) ;
CUDA_SAFE_CALL( cudaMemcpy( dev_maps_in, all_features,
total_features*sizeof(GaussianType),
cudaMemcpyHostToDevice) ) ;
CUDA_SAFE_CALL( cudaMemcpy( dev_merged_flags, merged_flags,
total_features*sizeof(bool),
cudaMemcpyHostToDevice)) ;
// kernel launch
phdUpdateMergeKernel<<<n,256>>>(
dev_maps_in, dev_maps_out, dev_merged_sizes,
dev_merged_flags, dev_map_sizes, n ) ;
CUDA_SAFE_CALL( cudaMemcpy( maps_out, dev_maps_out,
total_features*sizeof(GaussianType),
cudaMemcpyDeviceToHost) ) ;
CUDA_SAFE_CALL( cudaMemcpy( merged_sizes, dev_merged_sizes,
n*sizeof(int), cudaMemcpyDeviceToHost ) ) ;
int offset_in = 0 ;
int offset_out = 0 ;
for ( int i = 0 ; i < n ; i++ )
{
int n_copy = merged_sizes[i] ;
std::copy( maps_out+offset_out, maps_out+offset_out+n_copy,
all_features+offset_in) ;
offset_out += map_sizes[i] ;
offset_in += n_copy ;
}
total_features = offset_in ;
}
vector<GaussianType> expected_map(total_features) ;
std::copy( all_features,all_features+total_features, expected_map.begin() ) ;
CUDA_SAFE_CALL( cudaFree( dev_maps_in ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_maps_out ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_merged_sizes ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_merged_flags ) ) ;
CUDA_SAFE_CALL( cudaFree( dev_map_sizes ) ) ;
free(all_features) ;
free(merged_flags) ;
free(maps_out) ;
return expected_map ;
}
template<class GaussianType>
bool expectedFeaturesPredicate( GaussianType g )
{
return (g.weight <= config.minExpectedFeatureWeight) ;
}
/// copy the configuration structure to constant device memory
void
setDeviceConfig( const SlamConfig& config )
{
CUDA_SAFE_CALL(cudaMemcpyToSymbol( dev_config, &config, sizeof(SlamConfig) ) ) ;
// seed_rng();
}
///////////////////////////////////////////////////////////////////
__host__ __device__ void
transformCameraToWorld(REAL xCamera, REAL yCamera, REAL zCamera,
CameraState cam,
REAL& xWorld, REAL& yWorld, REAL& zWorld,
bool isPoint=true){
REAL croll = cos(cam.pose.proll) ;
REAL cpitch = cos(cam.pose.ppitch) ;
REAL cyaw = cos(cam.pose.pyaw) ;
REAL sroll = sin(cam.pose.proll) ;
REAL spitch = sin(cam.pose.ppitch) ;
REAL syaw = sin(cam.pose.pyaw) ;
xWorld = xCamera*(cpitch*cyaw) +
yCamera*(croll*syaw + sroll*spitch*cyaw) +
zCamera*(sroll*syaw - croll*spitch*cyaw) ;
yWorld = xCamera*(-cpitch*syaw) +
yCamera*(croll*cyaw - sroll*spitch*syaw) +
zCamera*(sroll*cyaw + croll*spitch*syaw) ;
zWorld = xCamera*(spitch) +
yCamera*(-sroll*cpitch) +
zCamera*(croll*cpitch) ;
if(isPoint){
xWorld += cam.pose.px ;
yWorld += cam.pose.py ;
zWorld += cam.pose.pz ;
}
}
__host__ __device__ void
transformWorldToCamera(REAL xWorld, REAL yWorld, REAL zWorld,
CameraState cam,
REAL& xCamera, REAL& yCamera, REAL& zCamera,
bool isPoint=true){
REAL croll = cos(cam.pose.proll) ;
REAL cpitch = cos(cam.pose.ppitch) ;
REAL cyaw = cos(cam.pose.pyaw) ;
REAL sroll = sin(cam.pose.proll) ;
REAL spitch = sin(cam.pose.ppitch) ;
REAL syaw = sin(cam.pose.pyaw) ;
xCamera = xWorld*(cpitch*cyaw) +
yWorld*(-cpitch*syaw) +
zWorld*(spitch) ;
yCamera = xWorld*(croll*syaw + sroll*spitch*cyaw) +
yWorld*(croll*cyaw - sroll*spitch*syaw) +
zWorld*(-sroll*cpitch) ;
zCamera = (xWorld)*(sroll*syaw - croll*spitch*cyaw) +
(yWorld)*(sroll*cyaw + croll*spitch*syaw) +
(zWorld)*(croll*cpitch) ;
if(isPoint){
xCamera += -cam.pose.px*(cpitch*cyaw) -
cam.pose.py*(-cpitch*syaw) -
cam.pose.pz*spitch ;
yCamera += -cam.pose.px*(croll*syaw + sroll*spitch*cyaw) -
cam.pose.py*(croll*cyaw - sroll*spitch*syaw) -
cam.pose.pz*(-sroll*cpitch) ;
zCamera += -cam.pose.px*(sroll*syaw - croll*spitch*cyaw) -
cam.pose.py*(sroll*cyaw + croll*spitch*syaw) -
cam.pose.pz*(croll*cpitch) ;
}
}
/// functor for use with thrust::transform to convert particles in euclidean
/// space to particles in disparity space (baseline = 1)
/** pass a vector of camera states to the constructor
* the argument to the functor is an 8-element tuple, where each element is a
* vector with one element per feature particle. The first four elements are
inputs:
* idx: index to the vector of camera states indicating to which camera
this particle belongs
x: x-coordinate of the particle
y: y-coordinate of the particle
z: z-coordinate of the particle
The last 4 elements are outputs computed by the functor:
u: u-coordinate of particle in disparity space
v: v-coordinate of particle in disparity space
d: disparity value of particle in disparity space
out_of_range: 1 if the particle is not visible to the camera
**/
struct world_to_disparity_transform{
const CameraState* camera_states ;
world_to_disparity_transform(CameraState* _states) : camera_states(_states) {}
template <typename Tuple>
__host__ __device__ void
operator()(Tuple t){
using namespace thrust ;
CameraState cam = camera_states[get<0>(t)] ;
REAL x = get<1>(t) ;
REAL y = get<2>(t) ;
REAL z = get<3>(t) ;
REAL xCamera = 0 ;
REAL yCamera = 0 ;
REAL zCamera = 0 ;
transformWorldToCamera(x,y,z,cam,xCamera,yCamera,zCamera) ;
get<4>(t) = cam.u0 - cam.fx*xCamera/zCamera ;
get<5>(t) = cam.v0 - cam.fy*yCamera/zCamera ;
get<6>(t) = -cam.fx/zCamera ;
bool in_fov = (get<4>(t) > 0) &&
(get<4>(t) < dev_config.imageWidth) &&
(get<5>(t) > 0) &&
(get<5>(t) < dev_config.imageHeight) &&
(get<6>(t) >= 0);
get<7>(t) = in_fov ? 1 : 0 ;
}
};
/// functor for use with thrust::transform to convert particles in disparity
/// space to particles in euclidean space (baseline = 1)
/** pass a vector of camera states to the constructor
* the argument to the functor is an 7-element tuple, where each element is a
* vector with one element per feature particle. The first four elements are
inputs:
* idx: index to the vector of camera states indicating to which camera
this particle belongs
u: u-coordinate of particle in disparity space
v: v-coordinate of particle in disparity space
d: disparity value of particle in disparity space
The last 3 elements are outputs computed by the functor:
x: x-coordinate of the particle
y: y-coordinate of the particle
z: z-coordinate of the particle
**/
struct disparity_to_world_transform{
const CameraState* camera_states ;
disparity_to_world_transform(CameraState* _states) : camera_states(_states) {}
template <typename Tuple>
__host__ __device__ void
operator()(Tuple t){
CameraState cam = camera_states[get<0>(t)] ;
REAL u = get<1>(t) ;
REAL v = get<2>(t) ;
REAL d = get<3>(t) ;
REAL xCamera = (u-cam.u0)/d ;
REAL yCamera = cam.fx/cam.fy*(v-cam.v0)/d ;
REAL zCamera = -cam.fx/d ;
transformCameraToWorld(xCamera,yCamera,zCamera,cam,
get<4>(t),get<5>(t),get<6>(t)) ;
}
};
/// this is a binary function which returns the sum of two numerical values
/// divided by an integer N. This can be used to compute the arithmetic mean of
/// N numbers by reduction.
struct compute_mean_function{
const int N ;
compute_mean_function(int _n) : N(_n) {}
template <typename T>
__host__ __device__ REAL
operator()(T x, T y){
return (REAL)(x+y)/(REAL)N ;
}
};
/// unary operator which multiplies the argument by a constant
template <typename T>
struct multiply_by : public thrust::unary_function<T,T>
{
const T N ;
multiply_by(T _n) : N(_n) {}
__host__ __device__ T
operator()(T x){return x*N ;}
};
/// unary operator which divides the argument by a constant
template <typename T>
struct divide_by : public thrust::unary_function<T,T>
{
const T N ;
divide_by(T _n) : N(_n) {}
__host__ __device__ T
operator()(T x){return x/N ;}
};
/// unary operator which returns the weight of a gaussian object
template <typename T>
struct get_weight : public thrust::unary_function<T,REAL>
{
__device__ REAL
operator()(T g){ return g.weight; }
} ;
struct gt0 : public thrust::unary_function<REAL,bool>
{
__host__ __device__ bool
operator()(const REAL x){return (x>0);}
} ;
struct leq0 : public thrust::unary_function<REAL,bool>
{
__host__ __device__ bool
operator()(const REAL x){return (x<=0);}
} ;
// create predicate for testing feature visiblity
struct is_inrange : public thrust::unary_function<Gaussian3D,bool>
{
__host__ __device__ bool
operator()(const Gaussian3D g){
REAL u = g.mean[0] ;
REAL v = g.mean[1] ;
REAL d = g.mean[2] ;
bool in_fov = (u > 0) &&
(u <= dev_config.imageWidth) &&
(v >= 0) &&
(v <= dev_config.imageHeight) &&
(d >= 0);
return in_fov ;
}
};
__global__ void
fitGaussiansKernel(REAL* uArray, REAL* vArray, REAL* dArray,
REAL* weights,int nGaussians,
Gaussian3D* gaussians){
int tid = threadIdx.x ;
__shared__ REAL sdata[256] ;
for (int i = blockIdx.x ; i < nGaussians ; i+=gridDim.x){
int nParticles = dev_config.particlesPerFeature ;
int offset = i*nParticles ;
REAL val = 0 ;
// compute mean u
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += uArray[offset+j] ;
}
sumByReduction(sdata,val,tid);
REAL uMean = sdata[0]/nParticles ;
__syncthreads() ;
// compute mean v
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += vArray[offset+j] ;
}
sumByReduction(sdata,val,tid);
REAL vMean = sdata[0]/nParticles ;
__syncthreads() ;
// compute mean d
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += dArray[offset+j] ;
}
sumByReduction(sdata,val,tid);
REAL dMean = sdata[0]/nParticles ;
__syncthreads() ;
// write means to output
if (tid == 0){
// cuPrintf("%f %f %f\n",uMean,vMean,dMean) ;
gaussians[i].weight = weights[i] ;
gaussians[i].mean[0] = uMean ;
gaussians[i].mean[1] = vMean ;
gaussians[i].mean[2] = dMean ;
}
// covariance term u-u
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += pow(uArray[offset+j]-uMean,2) ;
}
sumByReduction(sdata,val,tid);
if (tid == 0)
gaussians[i].cov[0] = sdata[0]/(nParticles-1) ;
__syncthreads() ;
// covariance term v-v
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += pow(vArray[offset+j]-vMean,2) ;
}
sumByReduction(sdata,val,tid);
if (tid == 0)
gaussians[i].cov[4] = sdata[0]/(nParticles-1) ;
__syncthreads() ;
// covariance term d-d
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += pow(dArray[offset+j]-dMean,2) ;
}
sumByReduction(sdata,val,tid);
if (tid == 0)
gaussians[i].cov[8] = sdata[0]/(nParticles-1) ;
__syncthreads() ;
// covariance term u-v
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += (uArray[offset+j]-uMean)*(vArray[offset+j]-vMean) ;
}
sumByReduction(sdata,val,tid);
if (tid == 0){
gaussians[i].cov[1] = sdata[0]/(nParticles-1) ;
gaussians[i].cov[3] = gaussians[i].cov[1] ;
}
__syncthreads() ;
// covariance term u-d
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += (uArray[offset+j]-uMean)*(dArray[offset+j]-dMean) ;
}
sumByReduction(sdata,val,tid);
if (tid == 0){
gaussians[i].cov[2] = sdata[0]/(nParticles-1) ;
gaussians[i].cov[6] = gaussians[i].cov[2] ;
}
__syncthreads() ;
// covariance term v-d
val = 0 ;
for(int j = tid ; j < nParticles ; j+=blockDim.x){
val += (vArray[offset+j]-vMean)*(dArray[offset+j]-dMean) ;
}
sumByReduction(sdata,val,tid);
if (tid == 0){
gaussians[i].cov[5] = sdata[0]/(nParticles-1) ;
gaussians[i].cov[7] = gaussians[i].cov[5] ;
}
__syncthreads() ;
}
}
__global__ void
sampleGaussiansKernel(Gaussian3D* gaussians, int n_gaussians,
RngState* seeds,REAL* samples){
int tid = blockIdx.x*blockDim.x + threadIdx.x ;
if (tid < dev_config.particlesPerFeature){
// initialize this thread's random number generator
RngState random_state = seeds[tid] ;
float x1,x2,x3,x_extra ;
float2 randnorms ;
bool odd_iteration = false ;
int idx_result = tid ;
int step = dev_config.particlesPerFeature*n_gaussians ;
// loop over gaussians
for (int n = 0 ; n < n_gaussians ; n++){
// cholesky decomposition of covariance matrix
REAL L11 = sqrt(gaussians[n].cov[0]) ;
REAL L21 = gaussians[n].cov[1]/L11 ;
REAL L22 = sqrt(gaussians[n].cov[4]-pow(L21,2)) ;
REAL L31 = gaussians[n].cov[2]/L11 ;
REAL L32 = (gaussians[n].cov[5]-L31*L21)/L22 ;
REAL L33 = sqrt(gaussians[n].cov[8] - pow(L31,2) - pow(L32,2)) ;
// generate uncorrelated normally distributed random values
randnorms = randn(random_state) ;
x1 = randnorms.x ;
x2 = randnorms.y ;
// the box-muller transform gives us normal variates two at a time,
// but we only need 3, so on even iterations, we call the transform
// twice and save the extra value to use in the next iteration.
if ( !odd_iteration ){
randnorms = randn(random_state) ;
x3 = randnorms.x ;
x_extra = randnorms.y ;
odd_iteration = true ;
}
else
{
x3 = x_extra ;
odd_iteration = false ;
}
// multiply uncorrelated values by cholesky decomposition and add
// mean
samples[idx_result] = x1*L11 + gaussians[n].mean[0] ;
samples[idx_result+step] = x1*L21 + x2*L22 + gaussians[n].mean[1] ;
samples[idx_result+2*step] = x1*L31 + x2*L32 + x3*L33 + gaussians[n].mean[2] ;
idx_result += dev_config.particlesPerFeature ;
}
}
}
__global__ void
preUpdateDisparityKernel(Gaussian3D* features_predict,
REAL* features_pd,
int n_features,
ImageMeasurement* Z, int n_measure,
Gaussian3D* features_preupdate){
int tid = blockIdx.x*blockDim.x + threadIdx.x ;
for ( int i = tid ; i < n_features ; i+=gridDim.x*blockDim.x){
Gaussian3D feature = features_predict[i] ;
REAL pd = features_pd[i] ;
// innovation covariance
REAL sigma[4] ;
REAL sigma_inv[4] ;
REAL varU = pow(dev_config.stdU,2) ;
REAL varV = pow(dev_config.stdV,2) ;
sigma[0] = feature.cov[0] + varU ;
sigma[1] = feature.cov[1] ;
sigma[2] = feature.cov[3] ;
sigma[3] = feature.cov[4] + varV ;
invert_matrix2(sigma,sigma_inv) ;
REAL det_sigma = sigma[0]*sigma[3] - sigma[1]*sigma[2] ;
REAL K[6] ;
K[0] = feature.cov[0]*sigma_inv[0] + feature.cov[3]*sigma_inv[1] ;
K[1] = feature.cov[1]*sigma_inv[0] + feature.cov[4]*sigma_inv[1] ;
K[2] = feature.cov[2]*sigma_inv[0] + feature.cov[5]*sigma_inv[1] ;
K[3] = feature.cov[0]*sigma_inv[2] + feature.cov[3]*sigma_inv[3] ;
K[4] = feature.cov[1]*sigma_inv[2] + feature.cov[4]*sigma_inv[3] ;
K[5] = feature.cov[2]*sigma_inv[2] + feature.cov[5]*sigma_inv[3] ;
// Maple-generated code for P = (IHK)*P*(IHK)' + KRK
REAL cov_preupdate[9] ;
cov_preupdate[0] = (1 - K[0]) * (feature.cov[0] * (1 - K[0]) - feature.cov[3] * K[3]) - K[3] * (feature.cov[1] * (1 - K[0]) - feature.cov[4] * K[3]) + varU * pow( K[0], 2) + varV * pow( K[3], 2);
cov_preupdate[1] = -K[1] * (feature.cov[0] * (1 - K[0]) - feature.cov[3] * K[3]) + (1 - K[4]) * (feature.cov[1] * (1 - K[0]) - feature.cov[4] * K[3]) + K[0] * varU * K[1] + K[3] * varV * K[4];
cov_preupdate[2] = -K[2] * (feature.cov[0] * (1 - K[0]) - feature.cov[3] * K[3]) - K[5] * (feature.cov[1] * (1 - K[0]) - feature.cov[4] * K[3]) + feature.cov[2] * (1 - K[0]) - feature.cov[5] * K[3] + K[0] * varU * K[2] + K[3] * varV * K[5];
cov_preupdate[3] = (1 - K[0]) * (-feature.cov[0] * K[1] + feature.cov[3] * (1 - K[4])) - K[3] * (-feature.cov[1] * K[1] + feature.cov[4] * (1 - K[4])) + K[0] * varU * K[1] + K[3] * varV * K[4];
cov_preupdate[4] = -K[1] * (-feature.cov[0] * K[1] + feature.cov[3] * (1 - K[4])) + (1 - K[4]) * (-feature.cov[1] * K[1] + feature.cov[4] * (1 - K[4])) + varU * pow( K[1], 2) + varV * pow( K[4], 2);
cov_preupdate[5] = -K[2] * (-feature.cov[0] * K[1] + feature.cov[3] * (1 - K[4])) - K[5] * (-feature.cov[1] * K[1] + feature.cov[4] * (1 - K[4])) - feature.cov[2] * K[1] + feature.cov[5] * (1 - K[4]) + K[1] * varU * K[2] + K[4] * varV * K[5];
cov_preupdate[6] = (1 - K[0]) * (-feature.cov[0] * K[2] - feature.cov[3] * K[5] + feature.cov[6]) - K[3] * (-feature.cov[1] * K[2] - feature.cov[4] * K[5] + feature.cov[7]) + K[0] * varU * K[2] + K[3] * varV * K[5];
cov_preupdate[7] = -K[1] * (-feature.cov[0] * K[2] - feature.cov[3] * K[5] + feature.cov[6]) + (1 - K[4]) * (-feature.cov[1] * K[2] - feature.cov[4] * K[5] + feature.cov[7]) + K[1] * varU * K[2] + K[4] * varV * K[5];
cov_preupdate[8] = -K[2] * (-feature.cov[0] * K[2] - feature.cov[3] * K[5] + feature.cov[6]) - K[5] * (-feature.cov[1] * K[2] - feature.cov[4] * K[5] + feature.cov[7]) - feature.cov[2] * K[2] - feature.cov[5] * K[5] + feature.cov[8] + varU * pow( K[2], 2) + varV * pow( K[5], 2);
// end maple code
for ( int m = 0 ; m < n_measure ; m++){
int preupdate_idx = m*n_features + i ;
REAL innov[2] ;
innov[0] = Z[m].u - feature.mean[0] ;
innov[1] = Z[m].v - feature.mean[1] ;
REAL dist = innov[0]*innov[0]*sigma_inv[0] +
innov[0]*innov[1]*(sigma_inv[1]+sigma_inv[2]) +
innov[1]*innov[1]*sigma_inv[3] ;
REAL log_weight = safeLog(pd) + safeLog(feature.weight)
- 0.5*dist - safeLog(2*M_PI) - 0.5*safeLog(det_sigma) ;
features_preupdate[preupdate_idx].weight = log_weight ;
features_preupdate[preupdate_idx].mean[0] = feature.mean[0] +
innov[0]*K[0] + innov[1]*K[3] ;
features_preupdate[preupdate_idx].mean[1] = feature.mean[1] +
innov[0]*K[1] + innov[1]*K[4] ;
features_preupdate[preupdate_idx].mean[2] = feature.mean[2] +
innov[0]*K[2] + innov[1]*K[5] ;
for ( int n = 0 ; n < 9 ; n++ )
features_preupdate[preupdate_idx].cov[n] = cov_preupdate[n] ;
}
}
}
/**
* @brief separateDisparityFeatures Separate features into in-range and out-of-range parts
* @param features_all[in] vector of disparity space gaussian features
* @param offsets_all[in] indexing offsets to \p features_all
* @param particles_all[in] vector of ParticleMaps corresponding to \p features_all
* @param features_in[out] vector in-range disparity space gaussian features
* @param offsets_in[out] indxing offsets to \p features_in
* @param particles_out[out] vector of ParticleMaps containing the 3D particles
* for out-of-range features
*/
void separateDisparityFeatures(device_vector<Gaussian3D> features_all,
host_vector<int> offsets_all,
vector<ParticleMap> particles_all,
device_vector<Gaussian3D>& features_in,
host_vector<int>& offsets_in,
vector<ParticleMap>& particles_out)
{
// make sure the output arrays are of sufficient size
features_in.resize(features_all.size());
offsets_in.resize(offsets_all.size());
particles_out.resize(particles_all.size());
// initialize the out-of-range particles to be empty
for (int n = 0 ; n < particles_out.size() ; n++){
particles_out[n].weights.clear();
particles_out[n].x.clear();
particles_out[n].y.clear();
particles_out[n].z.clear();
}
// compute the in-range mask
device_vector<bool> dev_inrange_mask(features_all.size()) ;
DEBUG_MSG("transform") ;
thrust::transform(features_all.begin(),features_all.end(),
dev_inrange_mask.begin(),is_inrange()) ;
host_vector<bool> inrange_mask = dev_inrange_mask ;
// do the separation
DEBUG_MSG("copy_if") ;
thrust::copy_if(features_all.begin(),features_all.end(),
features_in.begin(),
is_inrange()) ;
if(config.debug){
for ( int i = 0 ; i < inrange_mask.size() ; i++){
std::cout << inrange_mask[i] << " " ;
if (i % 20 == 0 && i > 0)
std::cout << std::endl ;
}
std::cout << std::endl ;
}
// compute the separated offset arrays and copy out-of-range 3d particles
int map_idx = 0 ;
int feature_idx = 0 ;
int start_particles = 0 ;
int stop_particles = config.particlesPerFeature ;
int offset_total = 0 ;
DEBUG_MSG("compute offsets") ;
for ( int i = 0 ; i < inrange_mask.size() ; i++ ){
// check if we have crossed over to the next map
if( i >= offsets_all[map_idx+1] )
{
map_idx++ ;
offsets_in[map_idx] = offset_total ;
start_particles = 0 ;
stop_particles = config.particlesPerFeature ;
feature_idx = 0 ;
}
// DEBUG_VAL(start_particles) ;
// DEBUG_VAL(stop_particles) ;
// DEBUG_VAL(feature_idx) ;
// DEBUG_VAL(map_idx) ;
// DEBUG_VAL(inrange_mask[i]) ;
if (inrange_mask[i])
{
offset_total++ ;
}
else{
particles_out[map_idx].x.insert(particles_out[map_idx].x.end(),
&particles_all[map_idx].x[start_particles],
&particles_all[map_idx].x[stop_particles]) ;
particles_out[map_idx].y.insert(particles_out[map_idx].y.end(),
&particles_all[map_idx].y[start_particles],
&particles_all[map_idx].y[stop_particles]) ;
particles_out[map_idx].z.insert(particles_out[map_idx].z.end(),
&particles_all[map_idx].z[start_particles],
&particles_all[map_idx].z[stop_particles]) ;
particles_out[map_idx].weights.push_back(particles_all[map_idx].weights[feature_idx]);
}
start_particles += config.particlesPerFeature ;
stop_particles += config.particlesPerFeature ;
feature_idx++ ;
}
map_idx++ ;
offsets_in[map_idx] = offset_total ;
// shrink the output arrays to fit data
DEBUG_MSG("Shrink features_in") ;
DEBUG_VAL(offsets_in.back()) ;
features_in.resize(offsets_in.back());
DEBUG_MSG("shrink_to_fit") ;
features_in.shrink_to_fit();
}
/**
* @brief recombineFeatures Merge in-range and out-of-range features into a
* single feature vector
* @param features_in[in] vector of in-range features
* @param offsets_in[in] vector of indexing offsets for in-range features
* @param features_out[in] vector of out-of-range features
* @param offsets_out[in] vector of indexing offsets for out-of-range features
* @param features_all[out] vector where merged result will be written
* @param offsets_all[out] indexing offsets for merged features
*/
void recombineFeatures(device_vector<Gaussian3D> features_in,
host_vector<int> offsets_in,
device_vector<Gaussian3D> features_out,
host_vector<int> offsets_out,
device_vector<Gaussian3D> features_all,
host_vector<int> offsets_all){
// allocate space for outputs
features_all.resize(features_in.size()+features_out.size());
offsets_all.resize(offsets_in.size());
device_vector<Gaussian3D>::iterator it_result = features_all.begin() ;
// merge vectors map-by-map
offsets_all[0] = 0 ;
for ( int n = 0 ; n < offsets_in.size() ; n++ ){
int start_in = offsets_in[n] ;
int stop_in = offsets_in[n+1] ;
it_result = thrust::copy(&features_in[start_in],
&features_in[stop_in],
it_result) ;
int start_out = offsets_out[n] ;
int stop_out = offsets_out[n+1] ;
it_result = thrust::copy(&features_out[start_out],
&features_out[stop_out],
it_result) ;
offsets_all[n+1] = stop_in + stop_out ;
}
}
void
disparityPredict(DisparitySLAM& slam){
DEBUG_MSG("Performing prediction") ;
host_vector<CameraState> states = slam.states ;
int n_states = states.size() ;
vector<REAL> noise_x(n_states) ;
vector<REAL> noise_y(n_states) ;
vector<REAL> noise_z(n_states) ;
vector<REAL> noise_roll(n_states) ;
vector<REAL> noise_pitch(n_states) ;
vector<REAL> noise_yaw(n_states) ;
for (int i = 0 ; i < n_states ; i++){
noise_x[i] = randn()*config.ax ;
noise_y[i] = randn()*config.ay ;
noise_z[i] = randn()*config.az ;
noise_roll[i] = randn()*config.aroll ;
noise_pitch[i] = randn()*config.apitch ;
noise_yaw[i] = randn()*config.ayaw ;
}
REAL dt = config.dt ;
for (int i = 0 ; i < n_states ; i++ ){
ConstantVelocityState3D pose = slam.states[i].pose ;
REAL dx = dt*pose.vx + 0.5*noise_x[i]*pow(dt,2) ;
REAL dy = dt*pose.vy + 0.5*noise_y[i]*pow(dt,2) ;
REAL dz = dt*pose.vz + 0.5*noise_z[i]*pow(dt,2) ;
REAL dx_world = 0 ;
REAL dy_world = 0 ;
REAL dz_world = 0 ;
transformCameraToWorld(dx,dy,dz,slam.states[i],
dx_world,dy_world,dz_world,false);
pose.px += dx_world ;
pose.py += dy_world ;
pose.pz += dz_world ;
pose.proll += dt*pose.vroll + 0.5*noise_roll[i]*pow(dt,2) ;
pose.ppitch += dt*pose.vpitch + 0.5*noise_pitch[i]*pow(dt,2) ;
pose.pyaw += dt*pose.vyaw + 0.5*noise_yaw[i]*pow(dt,2) ;
pose.vx += dt*noise_x[i] ;
pose.vy += dt*noise_y[i] ;
pose.vz += dt*noise_z[i] ;
pose.vroll += dt*noise_roll[i] ;
pose.vpitch += dt*noise_pitch[i] ;
pose.vyaw += dt*noise_yaw[i] ;
pose.proll = wrapAngle(pose.proll) ;
pose.ppitch = wrapAngle(pose.ppitch) ;
pose.pyaw = wrapAngle(pose.pyaw) ;
slam.states[i].pose = pose ;
}
}
void
disparityUpdate(DisparitySLAM& slam,
std::vector<ImageMeasurement> Z){
host_vector<ImageMeasurement> measurements = Z ;
// DEBUG_MSG("Received measurements: ") ;
// for ( int i = 0 ; i < Z.size() ; i++ ){
// cout << Z[i].u << "," << Z[i].v << endl ;
// }
// vector which contains the camera state to which each particle belongs
host_vector<int> camera_idx_vector ;
// vectors to contain concatenated particles
host_vector<REAL> x_vector ;
host_vector<REAL> y_vector ;
host_vector<REAL> z_vector ;
// vector to contain camera states
host_vector<CameraState> camera_vector = slam.states ;
if(config.debug){
DEBUG_MSG("Camera states: ") ;
for ( int n = 0 ; n < camera_vector.size() ; n++ ){
CameraState cam = camera_vector[n] ;
cout << n << " " << cam.pose.px << ","
<< cam.pose.py << ","
<< cam.pose.pz << ","
<< cam.pose.proll << ","
<< cam.pose.ppitch << ","
<< cam.pose.pyaw << endl ;
}
}
// vector of map sizes
host_vector<int> map_offsets(slam.n_particles+1) ;
map_offsets[0] = 0 ;
// vector feature weights
host_vector<REAL> feature_weights ;
for ( int n = 0 ; n < slam.n_particles ; n++ ) {
ParticleMap map_n = slam.maps[n] ;
int n_particles = map_n.x.size() ;
map_offsets[n+1] = map_offsets[n] + n_particles/config.particlesPerFeature ;
camera_idx_vector.insert(camera_idx_vector.end(),
n_particles,
n) ;
x_vector.insert(x_vector.end(),map_n.x.begin(),map_n.x.end()) ;
y_vector.insert(y_vector.end(),map_n.y.begin(),map_n.y.end()) ;
z_vector.insert(z_vector.end(),map_n.z.begin(),map_n.z.end()) ;
feature_weights.insert(feature_weights.end(),
map_n.weights.begin(),
map_n.weights.end()) ;
}
// create device vectors
int n_particles_total = x_vector.size() ;
int n_features_total = n_particles_total/config.particlesPerFeature ;
device_vector<CameraState> dev_camera_vector = camera_vector ;
device_vector<int> dev_camera_idx_vector = camera_idx_vector ;
device_vector<REAL> dev_x_vector = x_vector ;
device_vector<REAL> dev_y_vector = y_vector ;
device_vector<REAL> dev_z_vector = z_vector ;
device_vector<REAL> dev_u_vector(n_particles_total) ;
device_vector<REAL> dev_v_vector(n_particles_total) ;
device_vector<REAL> dev_d_vector(n_particles_total) ;
device_vector<REAL> dev_inrange_vector(n_particles_total) ;
// do the transformation
DEBUG_MSG("Performing world to disparity transformation") ;
thrust::for_each(make_zip_iterator(make_tuple(
dev_camera_idx_vector.begin(),
dev_x_vector.begin(),
dev_y_vector.begin(),
dev_z_vector.begin(),
dev_u_vector.begin(),
dev_v_vector.begin(),
dev_d_vector.begin(),
dev_inrange_vector.begin()
)),
make_zip_iterator(make_tuple(
dev_camera_idx_vector.end(),
dev_x_vector.end(),
dev_y_vector.end(),
dev_z_vector.end(),
dev_u_vector.end(),
dev_v_vector.end(),
dev_d_vector.end(),
dev_inrange_vector.end()
)),
world_to_disparity_transform(raw_pointer_cast(&dev_camera_vector[0]))) ;
// DEBUG_MSG("Disparity-transformed particles: ") ;
// DEBUG_MSG("First map: ") ;
// for( int j = 0 ; j < slam.maps[0].x.size() ; j++ ){
// cout << dev_u_vector[j] << ","
// << dev_v_vector[j] << ","
// << dev_d_vector[j] << endl ;
// }
// DEBUG_MSG("Second map: ") ;
// for( int j = 0 ; j < slam.maps[1].x.size() ; j++ ){
// cout << dev_u_vector[j+slam.maps[0].x.size()] << ","
// << dev_v_vector[j+slam.maps[0].x.size()] << ","
// << dev_d_vector[j+slam.maps[0].x.size()] << endl ;
// }
// // generate the keys for grouping particles into features
// host_vector<int> feature_keys ;
//
// DEBUG_VAL(n_features_total) ;
// for ( int n = 0 ; n < n_features_total ; n++ ){
// feature_keys.insert(feature_keys.end(),config.particlesPerFeature,n) ;
// }
// // compute pd for each gaussian feature
// DEBUG_MSG("Computing Pd") ;
// device_vector<int> dev_feature_keys = feature_keys ;
// device_vector<int> dev_keys_out(n_particles_total) ;
// device_vector<REAL> dev_pd(n_particles_total) ;
// // sum the in-range values of all particles per feature
// reduce_by_key(dev_feature_keys.begin(),dev_feature_keys.end(),
// dev_inrange_vector.begin(),
// dev_keys_out.begin(),dev_pd.begin()) ;
// // divide the sum by the number of particles per feature
// divide_by<REAL> division_op((REAL)config.particlesPerFeature) ;
// thrust::transform(dev_pd.begin(),dev_pd.end(),dev_pd.begin(),division_op) ;
// // multiply by nominal pd value
// multiply_by<REAL> multiply_op(config.pd) ;
// thrust::transform(dev_pd.begin(),dev_pd.end(),dev_pd.begin(),multiply_op) ;
// if(config.debug){
// DEBUG_MSG("Computed Pd for first particle:") ;
// for ( int j = 0 ; j < slam.maps[0].weights.size() ; j++ ){
// cout << dev_pd[j] << endl ;
// }
// }
// if (n_particles_total > 0 && config.debug){
// DEBUG_MSG("Verify disparity space particles: ");
// for (int j = 0 ; j < config.particlesPerFeature ; j++){
// cout << dev_u_vector[j] << "," << dev_v_vector[j] << "," << dev_d_vector[j] << endl ;
// }
// }
// DEBUG_MSG("Separate in-range and outside-range features") ;
// int k = 0 ;
// host_vector<REAL> pd_vector = dev_pd ;
// device_vector<REAL> dev_u_inrange ;
// host_vector<REAL> u_outrange ;
// device_vector<REAL> dev_v_inrange ;
// host_vector<REAL> v_outrange ;
// device_vector<REAL> dev_d_inrange ;
// host_vector<REAL> d_outrange ;
// host_vector<int> map_offsets_inrange(slam.n_particles+1,0) ;
// host_vector<int> map_offsets_outrange(slam.n_particles+1,0) ;
// int n_features_inrange = 0 ;
// DEBUG_MSG("particles...") ;
// for (int i = 0 ; i < slam.n_particles ; i++ ){
// int n_features = map_offsets[i+1]-map_offsets[i] ;
// map_offsets_inrange[i+1] = map_offsets_inrange[i] ;
// map_offsets_outrange[i+1] = map_offsets_outrange[i] ;
// for ( int j = 0 ; j < n_features ; j++ ){
// int offset_begin = k*config.particlesPerFeature ;
// int offset_end = offset_begin + config.particlesPerFeature ;
// if(pd_vector[k] > 0){
// dev_u_inrange.insert(dev_u_inrange.end(),
// dev_u_vector.begin()+offset_begin,
// dev_u_vector.begin()+offset_end) ;
// dev_v_inrange.insert(dev_v_inrange.end(),
// dev_v_vector.begin()+offset_begin,
// dev_v_vector.begin()+offset_end) ;
// dev_d_inrange.insert(dev_d_inrange.end(),
// dev_d_vector.begin()+offset_begin,
// dev_d_vector.begin()+offset_end) ;
// map_offsets_inrange[i+1]++ ;
// n_features_inrange++ ;
// }
// else{
// u_outrange.insert(u_outrange.end(),
// dev_u_vector.begin()+offset_begin,
// dev_u_vector.begin()+offset_end) ;
// v_outrange.insert(v_outrange.end(),
// dev_v_vector.begin()+offset_begin,
// dev_v_vector.begin()+offset_end) ;
// d_outrange.insert(d_outrange.end(),
// dev_d_vector.begin()+offset_begin,
// dev_d_vector.begin()+offset_end) ;
// map_offsets_outrange[i+1]++ ;
// }
// k++ ;
// }
// }
// DEBUG_MSG("weights...") ;
// host_vector<REAL> feature_weights_inrange(n_features_total) ;
// host_vector<REAL> feature_weights_outrange(n_features_total) ;
// host_vector<REAL> pd_inrange(n_features_total) ;
// DEBUG_MSG("copy inrange weights...") ;
// thrust::copy_if(feature_weights.begin(),
// feature_weights.end(),
// pd_vector.begin(),
// feature_weights_inrange.begin(),
// gt0()) ;
// DEBUG_MSG("copy outrange weights...") ;
// thrust::copy_if(feature_weights.begin(),
// feature_weights.end(),
// pd_vector.begin(),
// feature_weights_outrange.begin(),
// leq0()) ;
// DEBUG_MSG("copy pd in range") ;
// thrust::copy_if(pd_vector.begin(),
// pd_vector.end(),
// pd_inrange.begin(),
// gt0()) ;
// dev_pd = pd_inrange ;
// fit gaussians to particles
cudaPrintfInit() ;
DEBUG_MSG("Fitting gaussians to disparity space particles") ;
int n_blocks = min(65535,n_features_total) ;
device_vector<REAL> dev_feature_weights = feature_weights ;
device_vector<Gaussian3D> dev_gaussians(n_features_total) ;
fitGaussiansKernel<<<n_blocks,256>>>
(raw_pointer_cast(&dev_u_vector[0]),
raw_pointer_cast(&dev_v_vector[0]),
raw_pointer_cast(&dev_d_vector[0]),
raw_pointer_cast(&dev_feature_weights[0]),
n_features_total,
raw_pointer_cast(&dev_gaussians[0]) ) ;
// cudaPrintfDisplay() ;
if(config.debug){
DEBUG_MSG("Fitted gaussians:") ;
for ( int n = 0 ; n < n_features_total ; n++ ){
Gaussian3D g = dev_gaussians[n] ;
print_feature(g) ;
}
}
// separate in range and out of range gaussians
DEBUG_MSG("Separating in-range features") ;
// for(int n = 0 ; n < map_offsets.size() ; n++)
// DEBUG_VAL(map_offsets[n]) ;
host_vector<int> map_offsets_in(slam.n_particles+1,0) ;
device_vector<Gaussian3D> dev_gaussians_in ;
vector<ParticleMap> particles_out = slam.maps ;
separateDisparityFeatures(dev_gaussians,map_offsets,slam.maps,
dev_gaussians_in,map_offsets_in,
particles_out);
int n_features_in = map_offsets_in.back() ;
// for(int n = 0 ; n < map_offsets_in.size() ; n++)
// DEBUG_VAL(map_offsets_in[n]) ;
device_vector<REAL> dev_pd(dev_gaussians_in.size(),config.pd) ;
// if(config.debug){
// DEBUG_MSG("in-range gaussians:") ;
// for ( int n = 0 ; n < n_features_in ; n++ ){
// Gaussian3D g = dev_gaussians_in[n] ;
// print_feature(g) ;
// }
// }
// if (config.debug){
// DEBUG_MSG("out-of-range particles:") ;
// particles_out[0].print() ;
// }
// generate the birth terms
DEBUG_MSG("Generating birth terms from measurements") ;
int n_measurements = measurements.size() ;
host_vector<Gaussian3D> gaussians_birth(n_measurements*slam.n_particles) ;
for ( int m = 0 ; m < n_measurements*slam.n_particles ; m++ ){
if ( m < n_measurements ){
gaussians_birth[m].weight = safeLog(config.birthWeight) ;
gaussians_birth[m].mean[0] = measurements[m].u ;
gaussians_birth[m].mean[1] = measurements[m].v ;
gaussians_birth[m].mean[2] = config.disparityBirth ;
gaussians_birth[m].cov[0] = pow(config.stdU,2) ;
gaussians_birth[m].cov[4] = pow(config.stdV,2) ;
gaussians_birth[m].cov[8] = pow(config.stdDBirth,2) ;
gaussians_birth[m].cov[1] = 0 ;
gaussians_birth[m].cov[2] = 0 ;
gaussians_birth[m].cov[3] = 0 ;
gaussians_birth[m].cov[5] = 0 ;
gaussians_birth[m].cov[6] = 0 ;
gaussians_birth[m].cov[7] = 0 ;
}
else
{
int idx = m % n_measurements ;
copy_gaussians(gaussians_birth[idx],gaussians_birth[m]) ;
}
// print_feature(gaussians_birth[m]) ;
}
DEBUG_MSG("copy births to device") ;
device_vector<Gaussian3D> dev_gaussians_birth = gaussians_birth ;
DEBUG_VAL(n_measurements) ;
if (config.debug){
for ( int i = 0 ; i < measurements.size() ; i++){
std::cout << measurements[i].u << "," << measurements[i].v << std::endl ;
}
}
device_vector<ImageMeasurement> dev_measurements(n_measurements) ;
DEBUG_MSG("copy measurements to device") ;
dev_measurements = measurements ;
// do the preupdate
DEBUG_MSG("allocate preupdate terms") ;
device_vector<Gaussian3D> dev_gaussians_preupdate(n_features_in*n_measurements) ;
if (dev_gaussians_preupdate.size() > 0){
DEBUG_MSG("Computing disparity pre-update") ;
n_blocks = min(65535,n_features_in) ;
preUpdateDisparityKernel<<<n_blocks,256>>>
(raw_pointer_cast(&dev_gaussians_in[0]),
raw_pointer_cast(&dev_pd[0]),
n_features_in,
raw_pointer_cast(&dev_measurements[0]),
n_measurements,
raw_pointer_cast(&dev_gaussians_preupdate[0]));
if (config.debug){
DEBUG_MSG("pre-update terms:") ;
for(int j = 0 ; j < n_features_in*n_measurements ; j++ ){
Gaussian3D g= dev_gaussians_preupdate[j] ;
print_feature(g) ;
}
}
}
// do the sc-phd update
DEBUG_VAL(config.birthWeight) ;
DEBUG_VAL(config.clutterDensity) ;
DEBUG_MSG("allocate particle weights") ;
device_vector<REAL> dev_weights(slam.n_particles) ;
DEBUG_MSG("copy map offsets to device") ;
device_vector<int> dev_map_offsets = map_offsets_in ;
int n_update = n_features_in*(n_measurements+1) +
slam.n_particles*n_measurements ;
DEBUG_VAL(n_update) ;
DEBUG_MSG("allocate device memory for updated gaussians") ;
device_vector<Gaussian3D> dev_gaussians_update(n_update) ;
DEBUG_MSG("allocate device memory for merging flags") ;
device_vector<bool> dev_merge_flags(n_update) ;
n_blocks = min(slam.n_particles,65535) ;
// for ( int n = 0 ; n < slam.n_particles ; n++){
// int x = dev_map_offsets[n] ;
// DEBUG_VAL(x) ;
// }
DEBUG_MSG("Performing SC-PHD update") ;
phdUpdateKernel<<<n_blocks,256>>>
(raw_pointer_cast(&dev_gaussians_in[0]),
raw_pointer_cast(&dev_pd[0]),
raw_pointer_cast(&dev_gaussians_preupdate[0]),
raw_pointer_cast(&dev_gaussians_birth[0]),
raw_pointer_cast(&dev_map_offsets[0]),
slam.n_particles,n_measurements,
raw_pointer_cast(&dev_gaussians_update[0]),
raw_pointer_cast(&dev_merge_flags[0]),
raw_pointer_cast(&dev_weights[0])) ;
CUDA_SAFE_THREAD_SYNC() ;
// cudaPrintfDisplay() ;
cudaPrintfEnd();
// manually free some device memory
dev_gaussians_birth.resize(0);
dev_gaussians_birth.shrink_to_fit();
dev_gaussians.resize(0);
dev_gaussians.shrink_to_fit();
dev_gaussians_preupdate.resize(0);
dev_gaussians_preupdate.shrink_to_fit();
if(config.debug){
DEBUG_MSG("Updated gaussians and merge flags: ") ;
for (int n = 0 ; n < n_update ; n++){
bool flag = dev_merge_flags[n] ;
cout << flag << " " ;
Gaussian3D g = dev_gaussians_update[n] ;
print_feature(g) ;
}
}
// do the GM-merging
device_vector<int> dev_merged_sizes(slam.n_particles) ;
device_vector<Gaussian3D> dev_gaussians_merged_tmp(n_update) ;
// recalculate offsets for updated map size
for ( int n = 0 ; n < (slam.n_particles+1) ; n++ ){
map_offsets_in[n] *= (n_measurements+1) ;
map_offsets_in[n] += n_measurements*n ;
// DEBUG_VAL(map_offsets[n]) ;
}
dev_map_offsets = map_offsets_in ;
DEBUG_MSG("Performing GM reduction") ;
phdUpdateMergeKernel<<<n_blocks,256>>>
(raw_pointer_cast(&dev_gaussians_update[0]),
raw_pointer_cast(&dev_gaussians_merged_tmp[0]),
raw_pointer_cast(&dev_merged_sizes[0]),
raw_pointer_cast(&dev_merge_flags[0]),
raw_pointer_cast(&dev_map_offsets[0]),
slam.n_particles) ;
CUDA_SAFE_THREAD_SYNC() ;
// copy out the results of the GM reduction, leaving only valid gaussians
host_vector<int> merged_sizes = dev_merged_sizes ;
int n_merged_total = thrust::reduce(merged_sizes.begin(),
merged_sizes.end()) ;
device_vector<Gaussian3D> dev_gaussians_merged(n_merged_total) ;
device_vector<Gaussian3D>::iterator it = dev_gaussians_merged.begin() ;
for ( int n = 0 ; n < merged_sizes.size() ; n++){
it = thrust::copy_n(&dev_gaussians_merged_tmp[map_offsets_in[n]],
merged_sizes[n],
it) ;
}
// get the updated feature weights
device_vector<REAL> dev_merged_weights(n_merged_total) ;
get_weight<Gaussian3D> op ;
thrust::transform(dev_gaussians_merged.begin(),
dev_gaussians_merged.end(),
dev_merged_weights.begin(),
op) ;
host_vector<REAL> merged_weights = dev_merged_weights ;
if (config.debug)
{
DEBUG_MSG("merged feature weights: ") ;
for( int n = 0 ; n < merged_weights.size() ; n++){
cout << merged_weights[n] << endl ;
}
}
// initialize seeds for device-side random number generators
host_vector<RngState> seeds(config.particlesPerFeature) ;
for ( int n = 0 ; n < config.particlesPerFeature ; n++ ){
seeds[n].z1 = static_cast<unsigned>(randu01()*127 + 129) ;
seeds[n].z2 = static_cast<unsigned>(randu01()*127 + 129) ;
seeds[n].z3 = static_cast<unsigned>(randu01()*127 + 129) ;
seeds[n].z4 = static_cast<unsigned>(randu01()*256) ;
}
device_vector<RngState> dev_seeds = seeds ;
// DEBUG_MSG("seeds: ") ;
// for (int n = 0 ; n < seeds.size() ; n++){
// cout << "[" << seeds[n].z1 << "," << seeds[n].z2 << ","
// << seeds[n].z3 << "," << seeds[n].z4 << "]" << endl ;
// }
// generate samples from merged gaussians
DEBUG_MSG("Sampling merged gaussians") ;
int n_particles_merged = n_merged_total*config.particlesPerFeature ;
device_vector<REAL> dev_samples(3*n_particles_merged) ;
n_blocks = ceil(config.particlesPerFeature/256.0) ;
DEBUG_VAL(n_blocks) ;
sampleGaussiansKernel<<<n_blocks,256>>>(
raw_pointer_cast(&dev_gaussians_merged[0]),
n_merged_total,
raw_pointer_cast(&dev_seeds[0]),
raw_pointer_cast(&dev_samples[0]));
if(config.debug){
DEBUG_MSG("Verify Gaussian sampling:") ;
Gaussian3D g = dev_gaussians_merged[0] ;
print_feature(g) ;
for(int j = 0 ; j < config.particlesPerFeature ; j++){
cout << dev_samples[j] << ","
<< dev_samples[j+n_particles_merged] << ","
<< dev_samples[j+2*n_particles_merged] << endl ;
}
}
// split samples into individual components
dev_u_vector.resize(n_particles_merged);
dev_v_vector.resize(n_particles_merged);
dev_d_vector.resize(n_particles_merged);
thrust::copy_n(dev_samples.begin(),
n_particles_merged,dev_u_vector.begin()) ;
thrust::copy_n(dev_samples.begin()+n_particles_merged,
n_particles_merged,dev_v_vector.begin()) ;
thrust::copy_n(dev_samples.begin()+2*n_particles_merged,
n_particles_merged,dev_d_vector.begin()) ;
// prepare the camera index vector for transforming the particles
// and save gaussian weights
camera_idx_vector.clear();
int offset = 0 ;
for ( int n = 0 ; n < slam.n_particles ; n++ ){
int n_merged = merged_sizes[n] ;
camera_idx_vector.insert(camera_idx_vector.end(),
n_merged*config.particlesPerFeature, n) ;
slam.maps[n].weights.assign(&merged_weights[offset],
&merged_weights[offset+n_merged]);
offset += n_merged ;
}
dev_camera_idx_vector = camera_idx_vector ;
// // copy merged features to host, and sample disparity particles
// DEBUG_MSG("Sampling disparity space particles") ;
// host_vector<REAL> u_vector ;
// host_vector<REAL> v_vector ;
// host_vector<REAL> d_vector ;
// host_vector<Gaussian3D> gaussians_merged = dev_gaussians_merged ;
// camera_idx_vector.clear();
// for ( int n = 0 ; n < slam.n_particles ; n++ ){
// int offset = map_offsets_in[n] ;
// int n_merged = merged_sizes[n] ;
// if(config.debug)
// DEBUG_VAL(n_merged) ;
// host_vector<REAL> weights(0) ;
// camera_idx_vector.insert(camera_idx_vector.end(),
// n_merged*config.particlesPerFeature, n) ;
// for ( int i = 0 ; i < n_merged ; i++ ){
// Gaussian3D g = gaussians_merged[offset+i] ;
//// if(config.debug)
//// print_feature(g) ;
// vector<REAL> samples(config.particlesPerFeature*3) ;
// randmvn3(g.mean,g.cov,config.particlesPerFeature,&samples[0]);
// REAL* u_ptr = &samples[0] ;
// REAL* v_ptr = u_ptr+config.particlesPerFeature ;
// REAL* d_ptr = v_ptr+config.particlesPerFeature ;
// u_vector.insert(u_vector.end(),
// u_ptr, u_ptr+config.particlesPerFeature) ;
// v_vector.insert(v_vector.end(),
// v_ptr, v_ptr+config.particlesPerFeature) ;
// d_vector.insert(d_vector.end(),
// d_ptr, d_ptr+config.particlesPerFeature) ;
// // save the gaussian weight now
// weights.push_back(g.weight);
// }
// slam.maps[n].weights.assign(weights.begin(),weights.end()) ;
// }
// copy disparity particles to device
// n_particles_total = u_vector.size() ;
// dev_u_vector = u_vector ;
// dev_v_vector = v_vector ;
// dev_d_vector = d_vector ;
dev_x_vector.resize(n_particles_merged);
dev_y_vector.resize(n_particles_merged);
dev_z_vector.resize(n_particles_merged);
// for (int n = 0 ; n < u_vector.size() ; n++ )
// DEBUG_VAL(u_vector[n]) ;
// do the transformation
DEBUG_MSG("Computing disparity to world transformation") ;
thrust::for_each(make_zip_iterator(make_tuple(
dev_camera_idx_vector.begin(),
dev_u_vector.begin(),
dev_v_vector.begin(),
dev_d_vector.begin(),
dev_x_vector.begin(),
dev_y_vector.begin(),
dev_z_vector.begin()
)),
make_zip_iterator(make_tuple(
dev_camera_idx_vector.end(),
dev_u_vector.end(),
dev_v_vector.end(),
dev_d_vector.end(),
dev_x_vector.end(),
dev_y_vector.end(),
dev_z_vector.end()
)),
disparity_to_world_transform(raw_pointer_cast(&dev_camera_vector[0]))) ;
// save euclidean particles
DEBUG_MSG("Saving updated 3D particles") ;
x_vector = dev_x_vector ;
y_vector = dev_y_vector ;
z_vector = dev_z_vector ;
host_vector<REAL> weights = dev_weights ;
if(config.debug){
DEBUG_MSG("Verify disparity to euclidean transformation") ;
for( int j = 0 ; j < config.particlesPerFeature ; j++ ){
cout << x_vector[j] << "," << y_vector[j] << "," << z_vector[j] << endl ;
}
}
offset = 0 ;
for ( int n = 0 ; n < slam.n_particles ; n++ ){
// DEBUG_VAL(slam.weights[n]) ;
int n_particles = merged_sizes[n]*config.particlesPerFeature ;
slam.maps[n].x.assign(x_vector.begin()+offset,
x_vector.begin()+offset+n_particles) ;
slam.maps[n].y.assign(y_vector.begin()+offset,
y_vector.begin()+offset+n_particles) ;
slam.maps[n].z.assign(z_vector.begin()+offset,
z_vector.begin()+offset+n_particles) ;
offset += n_particles ;
// recombine with out of range particles
slam.maps[n].weights.insert(slam.maps[n].weights.end(),
particles_out[n].weights.begin(),
particles_out[n].weights.end()) ;
slam.maps[n].x.insert(slam.maps[n].x.end(),
particles_out[n].x.begin(),
particles_out[n].x.end()) ;
slam.maps[n].y.insert(slam.maps[n].y.end(),
particles_out[n].y.begin(),
particles_out[n].y.end()) ;
slam.maps[n].z.insert(slam.maps[n].z.end(),
particles_out[n].z.begin(),
particles_out[n].z.end()) ;
// update parent particle weights
slam.weights[n] += weights[n] ;
if (config.debug)
DEBUG_VAL(slam.weights[n]) ;
}
// if (config.debug){
// DEBUG_MSG("Updated map particles: ") ;
// for ( int n = 0 ; n < slam.n_particles ; n++ ){
// DEBUG_VAL(n) ;
// slam.maps[n].print() ;
// }
// }
// normalize particle weights
DEBUG_MSG("normalize weights") ;
REAL log_weight_sum = logSumExp(slam.weights) ;
DEBUG_VAL(log_weight_sum) ;
for(int n = 0 ; n < slam.n_particles ; n++ ){
slam.weights[n] -= log_weight_sum ;
if(config.debug)
DEBUG_VAL(slam.weights[n]) ;
}
}
|
812790a75c98673e02c09afb3cba33960393b590.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Created by Zebulun Arendsee.
March 26, 2013
Modified by Will Landau.
June 30, 2013
will-landau.com
[email protected]
This program implements a MCMC algorithm for the following hierarchical
model:
y_k ~ Poisson(n_k * theta_k) k = 1, ..., K
theta_k ~ Gamma(a, b)
a ~ Unif(0, a0)
b ~ Unif(0, b0)
We let a0 and b0 be arbitrarily large.
Arguments:
1) input filename
With two space delimited columns holding integer values for
y and float values for n.
2) number of trials (1000 by default)
Output: A comma delimited file containing a column for a, b, and each
theta. All output is written to stdout.
Example dataset:
$ head -3 data.txt
4 0.91643
23 3.23709
7 0.40103
Example of compilation and execution:
$ nvcc gibbs_metropolis.cu -o gibbs
$ ./gibbs mydata.txt 2500 > output.csv
$
This code borrows from the nVidia developer zone documentation,
specifically http://docs.nvidia.com/cuda/hiprand/index.html#topic_1_2_1
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <hiprand/hiprand_kernel.h>
#include <thrust/reduce.h>
#define PI 3.14159265359f
#define THREADS_PER_BLOCK 64
#define CUDA_CALL(x) {if((x) != hipSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", hipGetErrorString(hipGetLastError())); \
exit(EXIT_FAILURE);}}
#define CURAND_CALL(x) {if((x) != HIPRAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", hipGetErrorString(hipGetLastError())); \
exit(EXIT_FAILURE);}}
__host__ void load_data(int argc, char **argv, int *K, int **y, float **n);
__host__ float sample_a(float a, float b, int K, float sum_logs);
__host__ float sample_b(float a, int K, float flat_sum);
__host__ float rnorm();
__host__ float rgamma(float a, float b);
__device__ float rgamma(hiprandState_t *state, int id, float a, float b);
__global__ void sample_theta(hiprandState_t *state, float *theta, float *log_theta,
int *y, float *n, float a, float b, int K);
__global__ void setup_kernel(hiprandState_t *state, unsigned int seed, int);
int main(int argc, char **argv){
hiprandState_t *devStates;
float a, b, flat_sum, sum_logs, *n, *dev_n, *dev_theta, *dev_log_theta;
int i, K, *y, *dev_y, nBlocks, trials = 1000;
if(argc > 2)
trials = atoi(argv[2]);
load_data(argc, argv, &K, &y, &n);
/*------ Allocate memory -----------------------------------------*/
CUDA_CALL(hipMalloc((void **)&dev_y, K * sizeof(int)));
CUDA_CALL(hipMemcpy(dev_y, y, K * sizeof(int),
hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc((void **)&dev_n, K * sizeof(float)));
CUDA_CALL(hipMemcpy(dev_n, n, K * sizeof(float),
hipMemcpyHostToDevice));
/* Allocate space for theta and log_theta on device and host */
CUDA_CALL(hipMalloc((void **)&dev_theta, K * sizeof(float)));
CUDA_CALL(hipMalloc((void **)&dev_log_theta, K * sizeof(float)));
/* Allocate space for random states on device */
CUDA_CALL(hipMalloc((void **)&devStates, K * sizeof(hiprandState_t)));
/*------ Setup random number generators (one per thread) ---------*/
nBlocks = (K + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( setup_kernel), dim3(nBlocks), dim3(THREADS_PER_BLOCK), 0, 0, devStates, 0, K);
/*------ MCMC ----------------------------------------------------*/
printf("alpha, beta\n");
/* starting values of hyperparameters */
a = 20;
b = 1;
/* Steps of MCMC */
for(i = 0; i < trials; i++){
hipLaunchKernelGGL(( sample_theta), dim3(nBlocks), dim3(THREADS_PER_BLOCK), 0, 0, devStates, dev_theta, dev_log_theta,
dev_y, dev_n, a, b, K);
/* Make iterators for thetas and log thetas. */
thrust::device_ptr<float> theta(dev_theta);
thrust::device_ptr<float> log_theta(dev_log_theta);
/* Compute pairwise sums of thetas and log_thetas. */
flat_sum = thrust::reduce(theta, theta + K);
sum_logs = thrust::reduce(log_theta, log_theta + K);
/* Sample hyperparameters. */
a = sample_a(a, b, K, sum_logs);
b = sample_b(a, K, flat_sum);
/* print hyperparameters. */
printf("%f, %f\n", a, b);
}
/*------ Free Memory -------------------------------------------*/
free(y);
free(n);
CUDA_CALL(hipFree(devStates));
CUDA_CALL(hipFree(dev_theta));
CUDA_CALL(hipFree(dev_log_theta));
CUDA_CALL(hipFree(dev_y));
CUDA_CALL(hipFree(dev_n));
return EXIT_SUCCESS;
}
/*
* Read in data.
*/
__host__ void load_data(int argc, char **argv, int *K, int **y, float **n){
int k;
char line[128];
FILE *fp;
if(argc > 1){
fp = fopen(argv[1], "r");
} else {
printf("Please provide input filename\n");
exit(EXIT_FAILURE);
}
if(fp == NULL){
printf("Cannot read file \n");
exit(EXIT_FAILURE);
}
*K = 0;
while( fgets (line, sizeof line, fp) != NULL )
(*K)++;
rewind(fp);
*y = (int*) malloc((*K) * sizeof(int));
*n = (float*) malloc((*K) * sizeof(float));
for(k = 0; k < *K; k++)
fscanf(fp, "%d %f", *y + k, *n + k);
fclose(fp);
}
/*
* Metropolis algorithm for producing random a values.
* The proposal distribution in normal with a variance that
* is adjusted at each step.
*/
__host__ float sample_a(float a, float b, int K, float sum_logs){
static float sigma = 2;
float U, log_acceptance_ratio, proposal = rnorm() * sigma + a;
if(proposal <= 0)
return a;
log_acceptance_ratio = (proposal - a) * sum_logs +
K * (proposal - a) * log(b) -
K * (lgamma(proposal) - lgamma(a));
U = rand() / float(RAND_MAX);
if(log(U) < log_acceptance_ratio){
sigma *= 1.1;
return proposal;
} else {
sigma /= 1.1;
return a;
}
}
/*
* Sample b from a gamma distribution.
*/
__host__ float sample_b(float a, int K, float flat_sum){
float hyperA = K * a + 1;
float hyperB = flat_sum;
return rgamma(hyperA, hyperB);
}
/*
* Box-Muller Transformation: Generate one standard normal variable.
*
* This algorithm can be more efficiently used by producing two
* random normal variables. However, for the CPU, much faster
* algorithms are possible (e.g. the Ziggurat Algorithm);
*
* This is actually the algorithm chosen by NVIDIA to calculate
* normal random variables on the GPU.
*/
__host__ float rnorm(){
float U1 = rand() / float(RAND_MAX);
float U2 = rand() / float(RAND_MAX);
float V1 = sqrt(-2 * log(U1)) * cos(2 * PI * U2);
/* float V2 = sqrt(-2 * log(U2)) * cos(2 * PI * U1); */
return V1;
}
/*
* See device rgamma function. This is probably not the
* fastest way to generate random gamma variables on a CPU.
*/
__host__ float rgamma(float a, float b){
float d = a - 1.0 / 3;
float Y, U, v;
while(1){
Y = rnorm();
v = pow((1 + Y / sqrt(9 * d)), 3);
/* Necessary to avoid taking the log of a negative number later. */
if(v <= 0)
continue;
U = rand() / float(RAND_MAX);
/* Accept the sample under the following condition.
Otherwise repeat loop. */
if(log(U) < 0.5 * pow(Y,2) + d * (1 - v + log(v)))
return d * v / b;
}
}
/*
* Generate a single Gamma distributed random variable by the Marsoglia
* algorithm (George Marsaglia, Wai Wan Tsang; 2001).
*
* Zeb chose this algorithm because it has a very high acceptance rate (>96%),
* so this while loop will usually only need to run a few times. Many other
* algorithms, while perhaps faster on a CPU, have acceptance rates on the
* order of 50% (very bad in a massively parallel context).
*/
__device__ float rgamma(hiprandState_t *state, int id, float a, float b){
float d = a - 1.0 / 3;
float Y, U, v;
while(1){
Y = hiprand_normal(&state[id]);
v = pow((1 + Y / sqrt(9 * d)), 3);
/* Necessary to avoid taking the log of a negative number later. */
if(v <= 0)
continue;
U = hiprand_uniform(&state[id]);
/* Accept the sample under the following condition.
Otherwise repeat loop. */
if(log(U) < 0.5 * pow(Y,2) + d * (1 - v + log(v)))
return d * v / b;
}
}
/*
* Sample each theta from the appropriate gamma distribution
*/
__global__ void sample_theta(hiprandState_t *state,
float *theta, float *log_theta, int *y, float *n,
float a, float b, int K){
int id = threadIdx.x + blockIdx.x * blockDim.x;
float hyperA, hyperB;
if(id < K){
hyperA = a + y[id];
hyperB = b + n[id];
theta[id] = rgamma(state, id, hyperA, hyperB);
log_theta[id] = log(theta[id]);
}
}
/*
* Initialize GPU random number generators
*/
__global__ void setup_kernel(hiprandState_t *state, unsigned int seed, int K){
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < K)
hiprand_init(seed, id, 0, &state[id]);
}
|
812790a75c98673e02c09afb3cba33960393b590.cu
|
/*
Created by Zebulun Arendsee.
March 26, 2013
Modified by Will Landau.
June 30, 2013
will-landau.com
[email protected]
This program implements a MCMC algorithm for the following hierarchical
model:
y_k ~ Poisson(n_k * theta_k) k = 1, ..., K
theta_k ~ Gamma(a, b)
a ~ Unif(0, a0)
b ~ Unif(0, b0)
We let a0 and b0 be arbitrarily large.
Arguments:
1) input filename
With two space delimited columns holding integer values for
y and float values for n.
2) number of trials (1000 by default)
Output: A comma delimited file containing a column for a, b, and each
theta. All output is written to stdout.
Example dataset:
$ head -3 data.txt
4 0.91643
23 3.23709
7 0.40103
Example of compilation and execution:
$ nvcc gibbs_metropolis.cu -o gibbs
$ ./gibbs mydata.txt 2500 > output.csv
$
This code borrows from the nVidia developer zone documentation,
specifically http://docs.nvidia.com/cuda/curand/index.html#topic_1_2_1
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <curand_kernel.h>
#include <thrust/reduce.h>
#define PI 3.14159265359f
#define THREADS_PER_BLOCK 64
#define CUDA_CALL(x) {if((x) != cudaSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \
exit(EXIT_FAILURE);}}
#define CURAND_CALL(x) {if((x) != CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \
exit(EXIT_FAILURE);}}
__host__ void load_data(int argc, char **argv, int *K, int **y, float **n);
__host__ float sample_a(float a, float b, int K, float sum_logs);
__host__ float sample_b(float a, int K, float flat_sum);
__host__ float rnorm();
__host__ float rgamma(float a, float b);
__device__ float rgamma(curandState *state, int id, float a, float b);
__global__ void sample_theta(curandState *state, float *theta, float *log_theta,
int *y, float *n, float a, float b, int K);
__global__ void setup_kernel(curandState *state, unsigned int seed, int);
int main(int argc, char **argv){
curandState *devStates;
float a, b, flat_sum, sum_logs, *n, *dev_n, *dev_theta, *dev_log_theta;
int i, K, *y, *dev_y, nBlocks, trials = 1000;
if(argc > 2)
trials = atoi(argv[2]);
load_data(argc, argv, &K, &y, &n);
/*------ Allocate memory -----------------------------------------*/
CUDA_CALL(cudaMalloc((void **)&dev_y, K * sizeof(int)));
CUDA_CALL(cudaMemcpy(dev_y, y, K * sizeof(int),
cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc((void **)&dev_n, K * sizeof(float)));
CUDA_CALL(cudaMemcpy(dev_n, n, K * sizeof(float),
cudaMemcpyHostToDevice));
/* Allocate space for theta and log_theta on device and host */
CUDA_CALL(cudaMalloc((void **)&dev_theta, K * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&dev_log_theta, K * sizeof(float)));
/* Allocate space for random states on device */
CUDA_CALL(cudaMalloc((void **)&devStates, K * sizeof(curandState)));
/*------ Setup random number generators (one per thread) ---------*/
nBlocks = (K + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
setup_kernel<<<nBlocks, THREADS_PER_BLOCK>>>(devStates, 0, K);
/*------ MCMC ----------------------------------------------------*/
printf("alpha, beta\n");
/* starting values of hyperparameters */
a = 20;
b = 1;
/* Steps of MCMC */
for(i = 0; i < trials; i++){
sample_theta<<<nBlocks, THREADS_PER_BLOCK>>>(devStates, dev_theta, dev_log_theta,
dev_y, dev_n, a, b, K);
/* Make iterators for thetas and log thetas. */
thrust::device_ptr<float> theta(dev_theta);
thrust::device_ptr<float> log_theta(dev_log_theta);
/* Compute pairwise sums of thetas and log_thetas. */
flat_sum = thrust::reduce(theta, theta + K);
sum_logs = thrust::reduce(log_theta, log_theta + K);
/* Sample hyperparameters. */
a = sample_a(a, b, K, sum_logs);
b = sample_b(a, K, flat_sum);
/* print hyperparameters. */
printf("%f, %f\n", a, b);
}
/*------ Free Memory -------------------------------------------*/
free(y);
free(n);
CUDA_CALL(cudaFree(devStates));
CUDA_CALL(cudaFree(dev_theta));
CUDA_CALL(cudaFree(dev_log_theta));
CUDA_CALL(cudaFree(dev_y));
CUDA_CALL(cudaFree(dev_n));
return EXIT_SUCCESS;
}
/*
* Read in data.
*/
__host__ void load_data(int argc, char **argv, int *K, int **y, float **n){
int k;
char line[128];
FILE *fp;
if(argc > 1){
fp = fopen(argv[1], "r");
} else {
printf("Please provide input filename\n");
exit(EXIT_FAILURE);
}
if(fp == NULL){
printf("Cannot read file \n");
exit(EXIT_FAILURE);
}
*K = 0;
while( fgets (line, sizeof line, fp) != NULL )
(*K)++;
rewind(fp);
*y = (int*) malloc((*K) * sizeof(int));
*n = (float*) malloc((*K) * sizeof(float));
for(k = 0; k < *K; k++)
fscanf(fp, "%d %f", *y + k, *n + k);
fclose(fp);
}
/*
* Metropolis algorithm for producing random a values.
* The proposal distribution in normal with a variance that
* is adjusted at each step.
*/
__host__ float sample_a(float a, float b, int K, float sum_logs){
static float sigma = 2;
float U, log_acceptance_ratio, proposal = rnorm() * sigma + a;
if(proposal <= 0)
return a;
log_acceptance_ratio = (proposal - a) * sum_logs +
K * (proposal - a) * log(b) -
K * (lgamma(proposal) - lgamma(a));
U = rand() / float(RAND_MAX);
if(log(U) < log_acceptance_ratio){
sigma *= 1.1;
return proposal;
} else {
sigma /= 1.1;
return a;
}
}
/*
* Sample b from a gamma distribution.
*/
__host__ float sample_b(float a, int K, float flat_sum){
float hyperA = K * a + 1;
float hyperB = flat_sum;
return rgamma(hyperA, hyperB);
}
/*
* Box-Muller Transformation: Generate one standard normal variable.
*
* This algorithm can be more efficiently used by producing two
* random normal variables. However, for the CPU, much faster
* algorithms are possible (e.g. the Ziggurat Algorithm);
*
* This is actually the algorithm chosen by NVIDIA to calculate
* normal random variables on the GPU.
*/
__host__ float rnorm(){
float U1 = rand() / float(RAND_MAX);
float U2 = rand() / float(RAND_MAX);
float V1 = sqrt(-2 * log(U1)) * cos(2 * PI * U2);
/* float V2 = sqrt(-2 * log(U2)) * cos(2 * PI * U1); */
return V1;
}
/*
* See device rgamma function. This is probably not the
* fastest way to generate random gamma variables on a CPU.
*/
__host__ float rgamma(float a, float b){
float d = a - 1.0 / 3;
float Y, U, v;
while(1){
Y = rnorm();
v = pow((1 + Y / sqrt(9 * d)), 3);
/* Necessary to avoid taking the log of a negative number later. */
if(v <= 0)
continue;
U = rand() / float(RAND_MAX);
/* Accept the sample under the following condition.
Otherwise repeat loop. */
if(log(U) < 0.5 * pow(Y,2) + d * (1 - v + log(v)))
return d * v / b;
}
}
/*
* Generate a single Gamma distributed random variable by the Marsoglia
* algorithm (George Marsaglia, Wai Wan Tsang; 2001).
*
* Zeb chose this algorithm because it has a very high acceptance rate (>96%),
* so this while loop will usually only need to run a few times. Many other
* algorithms, while perhaps faster on a CPU, have acceptance rates on the
* order of 50% (very bad in a massively parallel context).
*/
__device__ float rgamma(curandState *state, int id, float a, float b){
float d = a - 1.0 / 3;
float Y, U, v;
while(1){
Y = curand_normal(&state[id]);
v = pow((1 + Y / sqrt(9 * d)), 3);
/* Necessary to avoid taking the log of a negative number later. */
if(v <= 0)
continue;
U = curand_uniform(&state[id]);
/* Accept the sample under the following condition.
Otherwise repeat loop. */
if(log(U) < 0.5 * pow(Y,2) + d * (1 - v + log(v)))
return d * v / b;
}
}
/*
* Sample each theta from the appropriate gamma distribution
*/
__global__ void sample_theta(curandState *state,
float *theta, float *log_theta, int *y, float *n,
float a, float b, int K){
int id = threadIdx.x + blockIdx.x * blockDim.x;
float hyperA, hyperB;
if(id < K){
hyperA = a + y[id];
hyperB = b + n[id];
theta[id] = rgamma(state, id, hyperA, hyperB);
log_theta[id] = log(theta[id]);
}
}
/*
* Initialize GPU random number generators
*/
__global__ void setup_kernel(curandState *state, unsigned int seed, int K){
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < K)
curand_init(seed, id, 0, &state[id]);
}
|
0f0f8a5451d89223a9aa92cc23644aa74e7eaaa2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/operators/interpolate_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using framework::Tensor;
template <typename T>
__global__ void KeNearestNeighborInterpFw(
const T* in, const size_t in_img_h, const size_t in_img_w,
const size_t input_h, const size_t input_w, T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const float ratio_h, const float ratio_w) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = static_cast<int>(ratio_h * out_img_idy + 0.5);
int out_img_idx = tid % out_img_w;
int in_img_idx = static_cast<int>(ratio_w * out_img_idx + 0.5);
out[tid] = in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
}
}
template <typename T>
__global__ void KeNearestNeighborInterpBw(
T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h,
const size_t input_w, const T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const float ratio_h, const float ratio_w) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = static_cast<int>(ratio_h * out_img_idy + 0.5);
int out_img_idx = tid % out_img_w;
int in_img_idx = static_cast<int>(ratio_w * out_img_idx + 0.5);
T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
const T out_pos = out[out_id_h * output_w + out_id_w];
platform::CudaAtomicAdd(in_pos, out_pos);
}
}
template <typename T>
__global__ void KeBilinearInterpFw(
const T* in, const size_t in_img_h, const size_t in_img_w,
const size_t input_h, const size_t input_w, T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const float ratio_h, const float ratio_w) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = ratio_h * out_img_idy;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T h1lambda = ratio_h * out_img_idy - in_img_idy;
T h2lambda = 1.f - h1lambda;
int out_img_idx = tid % out_img_w;
int in_img_idx = ratio_w * out_img_idx;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T w1lambda = ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
const T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
// bilinear interpolation
out[out_id_h * output_w + out_id_w] =
h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) +
h1lambda * (w2lambda * in_pos[h_id * in_img_w] +
w1lambda * in_pos[h_id * in_img_w + w_id]);
}
}
template <typename T>
__global__ void KeBilinearInterpBw(
T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h,
const size_t input_w, const T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const T ratio_h, const T ratio_w) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = ratio_h * out_img_idy;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T h1lambda = ratio_h * out_img_idy - in_img_idy;
T h2lambda = 1.f - h1lambda;
int out_img_idx = tid % out_img_w;
int in_img_idx = ratio_w * out_img_idx;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T w1lambda = ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
const T* out_pos = &out[out_id_h * output_w + out_id_w];
platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]);
platform::CudaAtomicAdd(&in_pos[w_id], h2lambda * w1lambda * out_pos[0]);
platform::CudaAtomicAdd(&in_pos[h_id * in_img_w],
h1lambda * w2lambda * out_pos[0]);
platform::CudaAtomicAdd(&in_pos[h_id * in_img_w + w_id],
h1lambda * w1lambda * out_pos[0]);
}
}
template <typename T>
class InterpolateOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* input_data = input->data<T>();
auto interp_method = ctx.Attr<std::string>("interp_method");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
Tensor sizes;
framework::TensorCopy(*out_size, platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_h = size_data[0];
out_w = size_data[1];
}
int n = input->dims()[0];
int c = input->dims()[1];
int in_h = input->dims()[2];
int in_w = input->dims()[3];
auto* output_data =
output->mutable_data<T>({n, c, out_h, out_w}, ctx.GetPlace());
int in_hw = in_h * in_w;
int out_hw = out_h * out_w;
int in_chw = c * in_hw;
int out_chw = c * out_hw;
float ratio_h =
(out_h > 1) ? static_cast<float>(in_h - 1) / (out_h - 1) : 0.f;
float ratio_w =
(out_w > 1) ? static_cast<float>(in_w - 1) / (out_w - 1) : 0.f;
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(*input, ctx.GetPlace(), output);
return;
}
int pixelNum = n * out_chw;
int grid_dim = (pixelNum + 512 - 1) / 512;
grid_dim = grid_dim > 8 ? 8 : grid_dim;
if ("nearest" == interp_method) {
hipLaunchKernelGGL(( KeNearestNeighborInterpFw<
T>), dim3(grid_dim), dim3(512), 0, ctx.cuda_device_context().stream(),
input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n,
out_chw, c, ratio_h, ratio_w);
} else if ("bilinear" == interp_method) {
hipLaunchKernelGGL(( KeBilinearInterpFw<
T>), dim3(grid_dim), dim3(512), 0, ctx.cuda_device_context().stream(),
input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n,
out_chw, c, ratio_h, ratio_w);
}
}
};
template <typename T>
class InterpolateGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* output_grad_data = output_grad->data<T>();
auto* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
auto& device_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
math::SetConstant<platform::CUDADeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
auto interp_method = ctx.Attr<std::string>("interp_method");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
Tensor sizes;
framework::TensorCopy(*out_size, platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_h = size_data[0];
out_w = size_data[1];
}
int n = input_grad->dims()[0];
int c = input_grad->dims()[1];
int in_h = input_grad->dims()[2];
int in_w = input_grad->dims()[3];
int in_hw = in_h * in_w;
int out_hw = out_h * out_w;
int in_chw = c * in_hw;
int out_chw = c * out_hw;
float ratio_h =
(out_h > 1) ? static_cast<float>(in_h - 1) / (out_h - 1) : 0.f;
float ratio_w =
(out_w > 1) ? static_cast<float>(in_w - 1) / (out_w - 1) : 0.f;
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(*output_grad, ctx.GetPlace(), input_grad);
return;
}
int pixelNum = n * out_chw;
int grid_dim = (pixelNum + 512 - 1) / 512;
grid_dim = grid_dim > 8 ? 8 : grid_dim;
if ("nearest" == interp_method) {
hipLaunchKernelGGL(( KeNearestNeighborInterpBw<
T>), dim3(grid_dim), dim3(512), 0, ctx.cuda_device_context().stream(),
input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h,
out_w, n, out_chw, c, ratio_h, ratio_w);
} else if ("bilinear" == interp_method) {
hipLaunchKernelGGL(( KeBilinearInterpBw<
T>), dim3(grid_dim), dim3(512), 0, ctx.cuda_device_context().stream(),
input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h,
out_w, n, out_chw, c, ratio_h, ratio_w);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(interpolate, ops::InterpolateOpCUDAKernel<float>,
ops::InterpolateOpCUDAKernel<double>,
ops::InterpolateOpCUDAKernel<int>);
REGISTER_OP_CUDA_KERNEL(interpolate_grad,
ops::InterpolateGradOpCUDAKernel<float>,
ops::InterpolateGradOpCUDAKernel<double>);
|
0f0f8a5451d89223a9aa92cc23644aa74e7eaaa2.cu
|
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/operators/interpolate_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using framework::Tensor;
template <typename T>
__global__ void KeNearestNeighborInterpFw(
const T* in, const size_t in_img_h, const size_t in_img_w,
const size_t input_h, const size_t input_w, T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const float ratio_h, const float ratio_w) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = static_cast<int>(ratio_h * out_img_idy + 0.5);
int out_img_idx = tid % out_img_w;
int in_img_idx = static_cast<int>(ratio_w * out_img_idx + 0.5);
out[tid] = in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
}
}
template <typename T>
__global__ void KeNearestNeighborInterpBw(
T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h,
const size_t input_w, const T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const float ratio_h, const float ratio_w) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = static_cast<int>(ratio_h * out_img_idy + 0.5);
int out_img_idx = tid % out_img_w;
int in_img_idx = static_cast<int>(ratio_w * out_img_idx + 0.5);
T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
const T out_pos = out[out_id_h * output_w + out_id_w];
platform::CudaAtomicAdd(in_pos, out_pos);
}
}
template <typename T>
__global__ void KeBilinearInterpFw(
const T* in, const size_t in_img_h, const size_t in_img_w,
const size_t input_h, const size_t input_w, T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const float ratio_h, const float ratio_w) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = ratio_h * out_img_idy;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T h1lambda = ratio_h * out_img_idy - in_img_idy;
T h2lambda = 1.f - h1lambda;
int out_img_idx = tid % out_img_w;
int in_img_idx = ratio_w * out_img_idx;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T w1lambda = ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
const T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
// bilinear interpolation
out[out_id_h * output_w + out_id_w] =
h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) +
h1lambda * (w2lambda * in_pos[h_id * in_img_w] +
w1lambda * in_pos[h_id * in_img_w + w_id]);
}
}
template <typename T>
__global__ void KeBilinearInterpBw(
T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h,
const size_t input_w, const T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const T ratio_h, const T ratio_w) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = ratio_h * out_img_idy;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T h1lambda = ratio_h * out_img_idy - in_img_idy;
T h2lambda = 1.f - h1lambda;
int out_img_idx = tid % out_img_w;
int in_img_idx = ratio_w * out_img_idx;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T w1lambda = ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
const T* out_pos = &out[out_id_h * output_w + out_id_w];
platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]);
platform::CudaAtomicAdd(&in_pos[w_id], h2lambda * w1lambda * out_pos[0]);
platform::CudaAtomicAdd(&in_pos[h_id * in_img_w],
h1lambda * w2lambda * out_pos[0]);
platform::CudaAtomicAdd(&in_pos[h_id * in_img_w + w_id],
h1lambda * w1lambda * out_pos[0]);
}
}
template <typename T>
class InterpolateOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* input_data = input->data<T>();
auto interp_method = ctx.Attr<std::string>("interp_method");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
Tensor sizes;
framework::TensorCopy(*out_size, platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_h = size_data[0];
out_w = size_data[1];
}
int n = input->dims()[0];
int c = input->dims()[1];
int in_h = input->dims()[2];
int in_w = input->dims()[3];
auto* output_data =
output->mutable_data<T>({n, c, out_h, out_w}, ctx.GetPlace());
int in_hw = in_h * in_w;
int out_hw = out_h * out_w;
int in_chw = c * in_hw;
int out_chw = c * out_hw;
float ratio_h =
(out_h > 1) ? static_cast<float>(in_h - 1) / (out_h - 1) : 0.f;
float ratio_w =
(out_w > 1) ? static_cast<float>(in_w - 1) / (out_w - 1) : 0.f;
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(*input, ctx.GetPlace(), output);
return;
}
int pixelNum = n * out_chw;
int grid_dim = (pixelNum + 512 - 1) / 512;
grid_dim = grid_dim > 8 ? 8 : grid_dim;
if ("nearest" == interp_method) {
KeNearestNeighborInterpFw<
T><<<grid_dim, 512, 0, ctx.cuda_device_context().stream()>>>(
input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n,
out_chw, c, ratio_h, ratio_w);
} else if ("bilinear" == interp_method) {
KeBilinearInterpFw<
T><<<grid_dim, 512, 0, ctx.cuda_device_context().stream()>>>(
input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n,
out_chw, c, ratio_h, ratio_w);
}
}
};
template <typename T>
class InterpolateGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* output_grad_data = output_grad->data<T>();
auto* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
auto& device_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
math::SetConstant<platform::CUDADeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
auto interp_method = ctx.Attr<std::string>("interp_method");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
Tensor sizes;
framework::TensorCopy(*out_size, platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_h = size_data[0];
out_w = size_data[1];
}
int n = input_grad->dims()[0];
int c = input_grad->dims()[1];
int in_h = input_grad->dims()[2];
int in_w = input_grad->dims()[3];
int in_hw = in_h * in_w;
int out_hw = out_h * out_w;
int in_chw = c * in_hw;
int out_chw = c * out_hw;
float ratio_h =
(out_h > 1) ? static_cast<float>(in_h - 1) / (out_h - 1) : 0.f;
float ratio_w =
(out_w > 1) ? static_cast<float>(in_w - 1) / (out_w - 1) : 0.f;
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(*output_grad, ctx.GetPlace(), input_grad);
return;
}
int pixelNum = n * out_chw;
int grid_dim = (pixelNum + 512 - 1) / 512;
grid_dim = grid_dim > 8 ? 8 : grid_dim;
if ("nearest" == interp_method) {
KeNearestNeighborInterpBw<
T><<<grid_dim, 512, 0, ctx.cuda_device_context().stream()>>>(
input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h,
out_w, n, out_chw, c, ratio_h, ratio_w);
} else if ("bilinear" == interp_method) {
KeBilinearInterpBw<
T><<<grid_dim, 512, 0, ctx.cuda_device_context().stream()>>>(
input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h,
out_w, n, out_chw, c, ratio_h, ratio_w);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(interpolate, ops::InterpolateOpCUDAKernel<float>,
ops::InterpolateOpCUDAKernel<double>,
ops::InterpolateOpCUDAKernel<int>);
REGISTER_OP_CUDA_KERNEL(interpolate_grad,
ops::InterpolateGradOpCUDAKernel<float>,
ops::InterpolateGradOpCUDAKernel<double>);
|
0e4a57a4e198106fbc630e411380e7990c127127.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void AdaDeltaUpdate(int N, Dtype* g, Dtype* h, Dtype* h2,
Dtype momentum, Dtype delta, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
float gi = g[i];
float hi = h[i] = momentum * h[i] + (1-momentum) * gi * gi;
gi = gi * sqrt((h2[i] + delta) / (hi + delta));
h2[i] = momentum * h2[i] + (1-momentum) * gi * gi;
g[i] = local_rate * gi;
}
}
template <typename Dtype>
void adadelta_update_gpu(int N, Dtype* g, Dtype* h, Dtype* h2, Dtype momentum,
Dtype delta, Dtype local_rate) {
AdaDeltaUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, g, h, h2, momentum, delta, local_rate);
CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream()));
}
template void adadelta_update_gpu<float>(int , float*, float*, float*,
float, float, float);
template void adadelta_update_gpu<double>(int, double*, double*, double*,
double, double, double);
} // namespace caffe
|
0e4a57a4e198106fbc630e411380e7990c127127.cu
|
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void AdaDeltaUpdate(int N, Dtype* g, Dtype* h, Dtype* h2,
Dtype momentum, Dtype delta, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
float gi = g[i];
float hi = h[i] = momentum * h[i] + (1-momentum) * gi * gi;
gi = gi * sqrt((h2[i] + delta) / (hi + delta));
h2[i] = momentum * h2[i] + (1-momentum) * gi * gi;
g[i] = local_rate * gi;
}
}
template <typename Dtype>
void adadelta_update_gpu(int N, Dtype* g, Dtype* h, Dtype* h2, Dtype momentum,
Dtype delta, Dtype local_rate) {
AdaDeltaUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, g, h, h2, momentum, delta, local_rate);
CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream()));
}
template void adadelta_update_gpu<float>(int , float*, float*, float*,
float, float, float);
template void adadelta_update_gpu<double>(int, double*, double*, double*,
double, double, double);
} // namespace caffe
|
b03b6cacf40da4bc22ada5faafc433fafd7b1a81.hip
|
// !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2020 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
//
#include <ATen/hip/HIPContext.h>
#include <torch/script.h>
#include <vector>
#include "open3d/ml/impl/sparse_conv/SparseConvTransposeBackpropFilter.cuh"
#include "open3d/ml/pytorch/TorchHelper.h"
using namespace open3d::ml::impl;
template <class TFeat, class TOut, class TIndex, class TKernelIndex>
void SparseConvTransposeBackpropFilterCUDA(
const torch::Tensor& filters,
const torch::Tensor& out_importance,
const torch::Tensor& inp_features,
const torch::Tensor& inp_neighbors_importance_sum,
const torch::Tensor& inp_neighbors_row_splits,
const torch::Tensor& neighbors_index,
const torch::Tensor& neighbors_kernel_index,
const torch::Tensor& neighbors_importance,
const torch::Tensor& neighbors_row_splits,
const torch::Tensor& out_features_gradient,
const bool normalize,
const int64_t max_temp_mem_MB,
torch::Tensor& filter_backprop) {
std::vector<int> filter_dims;
for (auto d : filters.sizes()) {
filter_dims.push_back(d);
}
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto cuda_device_props = at::cuda::getCurrentDeviceProperties();
const int texture_alignment = cuda_device_props->textureAlignment;
auto device = filters.device();
void* temp_ptr = nullptr;
size_t temp_size = 0;
size_t max_temp_size = 0;
// determine temp_size
SparseConvTransposeBackpropFilterCUDA<TFeat, TOut, TIndex, TKernelIndex>(
stream, temp_ptr, temp_size, max_temp_size, texture_alignment,
filter_backprop.data_ptr<TOut>(), filter_dims,
neighbors_row_splits.size(0) - 1,
out_importance.size(0) ? out_importance.data_ptr<TFeat>() : nullptr,
inp_features.size(0), inp_features.data_ptr<TFeat>(),
inp_neighbors_importance_sum.size(0)
? inp_neighbors_importance_sum.data_ptr<TFeat>()
: nullptr,
inp_neighbors_row_splits.data_ptr<int64_t>(),
neighbors_index.size(0), neighbors_index.data_ptr<TIndex>(),
neighbors_kernel_index.data_ptr<TKernelIndex>(),
neighbors_importance.size(0)
? neighbors_importance.data_ptr<TFeat>()
: nullptr,
neighbors_row_splits.data_ptr<int64_t>(),
out_features_gradient.data_ptr<TFeat>(), normalize);
temp_size = ::max(
::min(size_t(max_temp_mem_MB) * 1024 * 1024, max_temp_size),
temp_size);
auto temp_tensor = CreateTempTensor(temp_size, device, &temp_ptr);
// actually run the operation
SparseConvTransposeBackpropFilterCUDA<TFeat, TOut, TIndex, TKernelIndex>(
stream, temp_ptr, temp_size, max_temp_size, texture_alignment,
filter_backprop.data_ptr<TOut>(), filter_dims,
neighbors_row_splits.size(0) - 1,
out_importance.size(0) ? out_importance.data_ptr<TFeat>() : nullptr,
inp_features.size(0), inp_features.data_ptr<TFeat>(),
inp_neighbors_importance_sum.size(0)
? inp_neighbors_importance_sum.data_ptr<TFeat>()
: nullptr,
inp_neighbors_row_splits.data_ptr<int64_t>(),
neighbors_index.size(0), neighbors_index.data_ptr<TIndex>(),
neighbors_kernel_index.data_ptr<TKernelIndex>(),
neighbors_importance.size(0)
? neighbors_importance.data_ptr<TFeat>()
: nullptr,
neighbors_row_splits.data_ptr<int64_t>(),
out_features_gradient.data_ptr<TFeat>(), normalize);
}
#define INSTANTIATE(TFeat, TOut, TIndex, TKernelIndex) \
template void \
SparseConvTransposeBackpropFilterCUDA<TFeat, TOut, TIndex, TKernelIndex>( \
const torch::Tensor& filters, const torch::Tensor& out_importance, \
const torch::Tensor& inp_features, \
const torch::Tensor& inp_neighbors_importance_sum, \
const torch::Tensor& inp_neighbors_row_splits, \
const torch::Tensor& neighbors_index, \
const torch::Tensor& neighbors_kernel_index, \
const torch::Tensor& neighbors_importance, \
const torch::Tensor& neighbors_row_splits, \
const torch::Tensor& out_features_gradient, const bool normalize, \
const int64_t max_temp_mem_MB, torch::Tensor& filter_backprop);
INSTANTIATE(float, float, int32_t, uint8_t)
|
b03b6cacf40da4bc22ada5faafc433fafd7b1a81.cu
|
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2020 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
//
#include <ATen/cuda/CUDAContext.h>
#include <torch/script.h>
#include <vector>
#include "open3d/ml/impl/sparse_conv/SparseConvTransposeBackpropFilter.cuh"
#include "open3d/ml/pytorch/TorchHelper.h"
using namespace open3d::ml::impl;
template <class TFeat, class TOut, class TIndex, class TKernelIndex>
void SparseConvTransposeBackpropFilterCUDA(
const torch::Tensor& filters,
const torch::Tensor& out_importance,
const torch::Tensor& inp_features,
const torch::Tensor& inp_neighbors_importance_sum,
const torch::Tensor& inp_neighbors_row_splits,
const torch::Tensor& neighbors_index,
const torch::Tensor& neighbors_kernel_index,
const torch::Tensor& neighbors_importance,
const torch::Tensor& neighbors_row_splits,
const torch::Tensor& out_features_gradient,
const bool normalize,
const int64_t max_temp_mem_MB,
torch::Tensor& filter_backprop) {
std::vector<int> filter_dims;
for (auto d : filters.sizes()) {
filter_dims.push_back(d);
}
auto stream = at::cuda::getCurrentCUDAStream();
auto cuda_device_props = at::cuda::getCurrentDeviceProperties();
const int texture_alignment = cuda_device_props->textureAlignment;
auto device = filters.device();
void* temp_ptr = nullptr;
size_t temp_size = 0;
size_t max_temp_size = 0;
// determine temp_size
SparseConvTransposeBackpropFilterCUDA<TFeat, TOut, TIndex, TKernelIndex>(
stream, temp_ptr, temp_size, max_temp_size, texture_alignment,
filter_backprop.data_ptr<TOut>(), filter_dims,
neighbors_row_splits.size(0) - 1,
out_importance.size(0) ? out_importance.data_ptr<TFeat>() : nullptr,
inp_features.size(0), inp_features.data_ptr<TFeat>(),
inp_neighbors_importance_sum.size(0)
? inp_neighbors_importance_sum.data_ptr<TFeat>()
: nullptr,
inp_neighbors_row_splits.data_ptr<int64_t>(),
neighbors_index.size(0), neighbors_index.data_ptr<TIndex>(),
neighbors_kernel_index.data_ptr<TKernelIndex>(),
neighbors_importance.size(0)
? neighbors_importance.data_ptr<TFeat>()
: nullptr,
neighbors_row_splits.data_ptr<int64_t>(),
out_features_gradient.data_ptr<TFeat>(), normalize);
temp_size = std::max(
std::min(size_t(max_temp_mem_MB) * 1024 * 1024, max_temp_size),
temp_size);
auto temp_tensor = CreateTempTensor(temp_size, device, &temp_ptr);
// actually run the operation
SparseConvTransposeBackpropFilterCUDA<TFeat, TOut, TIndex, TKernelIndex>(
stream, temp_ptr, temp_size, max_temp_size, texture_alignment,
filter_backprop.data_ptr<TOut>(), filter_dims,
neighbors_row_splits.size(0) - 1,
out_importance.size(0) ? out_importance.data_ptr<TFeat>() : nullptr,
inp_features.size(0), inp_features.data_ptr<TFeat>(),
inp_neighbors_importance_sum.size(0)
? inp_neighbors_importance_sum.data_ptr<TFeat>()
: nullptr,
inp_neighbors_row_splits.data_ptr<int64_t>(),
neighbors_index.size(0), neighbors_index.data_ptr<TIndex>(),
neighbors_kernel_index.data_ptr<TKernelIndex>(),
neighbors_importance.size(0)
? neighbors_importance.data_ptr<TFeat>()
: nullptr,
neighbors_row_splits.data_ptr<int64_t>(),
out_features_gradient.data_ptr<TFeat>(), normalize);
}
#define INSTANTIATE(TFeat, TOut, TIndex, TKernelIndex) \
template void \
SparseConvTransposeBackpropFilterCUDA<TFeat, TOut, TIndex, TKernelIndex>( \
const torch::Tensor& filters, const torch::Tensor& out_importance, \
const torch::Tensor& inp_features, \
const torch::Tensor& inp_neighbors_importance_sum, \
const torch::Tensor& inp_neighbors_row_splits, \
const torch::Tensor& neighbors_index, \
const torch::Tensor& neighbors_kernel_index, \
const torch::Tensor& neighbors_importance, \
const torch::Tensor& neighbors_row_splits, \
const torch::Tensor& out_features_gradient, const bool normalize, \
const int64_t max_temp_mem_MB, torch::Tensor& filter_backprop);
INSTANTIATE(float, float, int32_t, uint8_t)
|
4a90df4dbf534e732c0d482e7c9686e0e86ce854.hip
|
// !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_salsa.cu
*
* @brief Simple test driver program for using SALSA algorithm to compute rank.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
#include <cstdlib>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BFS includes
#include <gunrock/app/salsa/salsa_enactor.cuh>
#include <gunrock/app/salsa/salsa_problem.cuh>
#include <gunrock/app/salsa/salsa_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::salsa;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
template <typename VertexId, typename Value>
struct RankPair {
VertexId vertex_id;
Value page_rank;
RankPair(VertexId vertex_id, Value page_rank) : vertex_id(vertex_id), page_rank(page_rank) {}
};
template<typename RankPair>
bool SALSACompare(
RankPair elem1,
RankPair elem2)
{
return elem1.page_rank > elem2.page_rank;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf("\ntest_salsa <graph type> <graph type args> [--device=<device_index>] "
"[--undirected] [--instrumented] [--quick] "
"[--v]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the graph primitive.\n"
" --undirected If set then treat the graph as undirected.\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance.)\n"
" --quick If set will skip the CPU validation code.\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @param[in] hrank Pointer to hub rank score array
* @param[in] arank Pointer to authority rank score array
* @param[in] nodes Number of nodes in the graph.
*/
template<typename Value, typename SizeT>
void DisplaySolution(Value *hrank, Value *arank, SizeT nodes)
{
//sort the top page ranks
RankPair<SizeT, Value> *hr_list =
(RankPair<SizeT, Value>*)malloc(sizeof(RankPair<SizeT, Value>) * nodes);
RankPair<SizeT, Value> *ar_list =
(RankPair<SizeT, Value>*)malloc(sizeof(RankPair<SizeT, Value>) * nodes);
for (int i = 0; i < nodes; ++i)
{
hr_list[i].vertex_id = i;
hr_list[i].page_rank = hrank[i];
ar_list[i].vertex_id = i;
ar_list[i].page_rank = arank[i];
}
std::stable_sort(
hr_list, hr_list + nodes, SALSACompare<RankPair<SizeT, Value> >);
std::stable_sort(
ar_list, ar_list + nodes, SALSACompare<RankPair<SizeT, Value> >);
// Print out at most top 10 largest components
int top = (nodes < 10) ? nodes : 10;
printf("Top %d Page Ranks:\n", top);
for (int i = 0; i < top; ++i)
{
printf("Vertex ID: %d, Hub Rank: %5f\n", hr_list[i].vertex_id, hr_list[i].page_rank);
printf("Vertex ID: %d, Authority Rank: %5f\n", ar_list[i].vertex_id, ar_list[i].page_rank);
}
free(hr_list);
free(ar_list);
}
/**
* Performance/Evaluation statistics
*/
struct Stats
{
const char *name;
Statistic rate;
Statistic search_depth;
Statistic redundant_work;
Statistic duty;
Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {}
Stats(const char *name) :
name(name), rate(), search_depth(), redundant_work(), duty() {}
};
/**
* @brief Displays timing and correctness statistics
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] stats Reference to the Stats object defined in RunTests
* @param[in] elapsed Total elapsed kernel running time
* @param[in] avg_duty Average duty of the BFS kernels
*/
void DisplayStats(
Stats &stats,
double elapsed,
double avg_duty)
{
// Display test name
printf("[%s] finished. ", stats.name);
// Display the specific sample statistics
printf(" elapsed: %.3f ms", elapsed);
if (avg_duty != 0)
{
printf("\n avg CTA duty: %.2f%%", avg_duty * 100);
}
printf("\n");
}
/******************************************************************************
* SALSA Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference SALSA implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] inv_graph Reference to the inversed CSR graph we process on
* @param[in] hrank Host-side vector to store CPU computed hub ranks for each node
* @param[in] arank Host-side vector to store CPU computed authority ranks for each node
* @param[in] max_iter max iteration to go
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void SimpleReferenceSALSA(
const Csr<VertexId, Value, SizeT> &graph,
const Csr<VertexId, Value, SizeT> &inv_graph,
Value *hrank,
Value *arank,
SizeT max_iter)
{
//Preparation
//
//compute SALSA rank
//
CpuTimer cpu_timer;
cpu_timer.Start();
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
printf("CPU BFS finished in %lf msec.\n", elapsed);
}
/**
* @brief Run SALSA tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] inv_graph Reference to the inversed CSR graph we process on
* @param[in] max_iter Max iteration for Page Rank computing
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] max_queue_sizing Max queue sizing for frontier
* @param[in] context CudaContext for moderngpu to use
*
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
const Csr<VertexId, Value, SizeT> &inv_graph,
SizeT max_iter,
int max_grid_size,
int num_gpus,
double max_queue_sizing,
CudaContext& context)
{
typedef SALSAProblem<
VertexId,
SizeT,
Value> Problem;
// Allocate host-side label array (for both reference and gpu-computed results)
Value *reference_hrank = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *reference_arank = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_hrank = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_arank = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *reference_check_h = (g_quick) ? NULL : reference_hrank;
Value *reference_check_a = (g_quick) ? NULL : reference_arank;
// Allocate BFS enactor map
SALSAEnactor<INSTRUMENT> salsa_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
graph,
inv_graph,
num_gpus),
"Problem SALSA Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU SALSA solution for source-distance
//
if (reference_check_h != NULL)
{
printf("compute ref value\n");
SimpleReferenceSALSA(
graph,
inv_graph,
reference_check_h,
reference_check_a,
max_iter);
printf("\n");
}
Stats *stats = new Stats("GPU SALSA");
long long total_queued = 0;
double avg_duty = 0.0;
// Perform BFS
GpuTimer gpu_timer;
util::GRError(
csr_problem->Reset(salsa_enactor.GetFrontierType(), 200.0),
"SALSA Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
salsa_enactor.template Enact<Problem>(
context, csr_problem, max_iter, max_grid_size),
"SALSA Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
salsa_enactor.GetStatistics(total_queued, avg_duty);
double elapsed = gpu_timer.ElapsedMillis();
// Copy out results
util::GRError(
csr_problem->Extract(h_hrank, h_arank),
"SALSA Problem Data Extraction Failed", __FILE__, __LINE__);
// Verify the result
if (reference_check_a != NULL)
{
printf("Validity: ");
CompareResults(h_hrank, reference_check_h, graph.nodes, true);
CompareResults(h_arank, reference_check_a, graph.nodes, true);
}
printf("\nFirst 40 labels of the GPU result.");
// Display Solution
DisplaySolution(h_hrank, h_arank, graph.nodes);
DisplayStats(
*stats,
elapsed,
avg_duty);
// Cleanup
delete stats;
if (csr_problem) delete csr_problem;
if (reference_check_h) free(reference_check_h);
if (reference_check_a) free(reference_check_a);
if (h_hrank) free(h_hrank);
if (h_arank) free(h_arank);
hipDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] inv_graph Reference to the inversed CSR graph we process on
* @param[in] args Reference to the command line arguments
* @param[in] context CudaContex for moderngpu library
*/
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
Csr<VertexId, Value, SizeT> &inv_graph,
CommandLineArgs &args,
CudaContext& context)
{
SizeT max_iter = 20;
bool instrumented = false; // Whether or not to collect instrumentation from kernels
int max_grid_size = 0; // maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
double max_queue_sizing = 1.0;
instrumented = args.CheckCmdLineFlag("instrumented");
args.GetCmdLineArgument("max-iter", max_iter);
g_quick = true; // missing reference now
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("queue-sizing", max_queue_sizing);
if (instrumented)
{
RunTests<VertexId, Value, SizeT, true>(
graph,
inv_graph,
max_iter,
max_grid_size,
num_gpus,
max_queue_sizing,
context);
}
else
{
RunTests<VertexId, Value, SizeT, false>(
graph,
inv_graph,
max_iter,
max_grid_size,
num_gpus,
max_queue_sizing,
context);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
//DeviceInit(args);
//hipSetDeviceFlags(hipDeviceMapHost);
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
//srand(0); // Presently deterministic
//srand(time(NULL));
// Parse graph-contruction params
g_undirected = false;
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market")
{
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier
typedef float Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
Csr<VertexId, Value, SizeT> inv_csr(false);
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0)
{
return 1;
}
if (graphio::BuildMarketGraph<false>(
market_filename,
inv_csr,
g_undirected,
true) != 0)
{
return 1;
}
csr.PrintHistogram();
//csr.DisplayGraph();
//inv_csr.DisplayGraph();
printf("out node:%d, in node:%d\n", csr.out_nodes, inv_csr.out_nodes);
// Run tests
RunTests(csr, inv_csr, args, *context);
}
else
{
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
|
4a90df4dbf534e732c0d482e7c9686e0e86ce854.cu
|
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_salsa.cu
*
* @brief Simple test driver program for using SALSA algorithm to compute rank.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
#include <cstdlib>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BFS includes
#include <gunrock/app/salsa/salsa_enactor.cuh>
#include <gunrock/app/salsa/salsa_problem.cuh>
#include <gunrock/app/salsa/salsa_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::salsa;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
template <typename VertexId, typename Value>
struct RankPair {
VertexId vertex_id;
Value page_rank;
RankPair(VertexId vertex_id, Value page_rank) : vertex_id(vertex_id), page_rank(page_rank) {}
};
template<typename RankPair>
bool SALSACompare(
RankPair elem1,
RankPair elem2)
{
return elem1.page_rank > elem2.page_rank;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf("\ntest_salsa <graph type> <graph type args> [--device=<device_index>] "
"[--undirected] [--instrumented] [--quick] "
"[--v]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the graph primitive.\n"
" --undirected If set then treat the graph as undirected.\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance.)\n"
" --quick If set will skip the CPU validation code.\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @param[in] hrank Pointer to hub rank score array
* @param[in] arank Pointer to authority rank score array
* @param[in] nodes Number of nodes in the graph.
*/
template<typename Value, typename SizeT>
void DisplaySolution(Value *hrank, Value *arank, SizeT nodes)
{
//sort the top page ranks
RankPair<SizeT, Value> *hr_list =
(RankPair<SizeT, Value>*)malloc(sizeof(RankPair<SizeT, Value>) * nodes);
RankPair<SizeT, Value> *ar_list =
(RankPair<SizeT, Value>*)malloc(sizeof(RankPair<SizeT, Value>) * nodes);
for (int i = 0; i < nodes; ++i)
{
hr_list[i].vertex_id = i;
hr_list[i].page_rank = hrank[i];
ar_list[i].vertex_id = i;
ar_list[i].page_rank = arank[i];
}
std::stable_sort(
hr_list, hr_list + nodes, SALSACompare<RankPair<SizeT, Value> >);
std::stable_sort(
ar_list, ar_list + nodes, SALSACompare<RankPair<SizeT, Value> >);
// Print out at most top 10 largest components
int top = (nodes < 10) ? nodes : 10;
printf("Top %d Page Ranks:\n", top);
for (int i = 0; i < top; ++i)
{
printf("Vertex ID: %d, Hub Rank: %5f\n", hr_list[i].vertex_id, hr_list[i].page_rank);
printf("Vertex ID: %d, Authority Rank: %5f\n", ar_list[i].vertex_id, ar_list[i].page_rank);
}
free(hr_list);
free(ar_list);
}
/**
* Performance/Evaluation statistics
*/
struct Stats
{
const char *name;
Statistic rate;
Statistic search_depth;
Statistic redundant_work;
Statistic duty;
Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {}
Stats(const char *name) :
name(name), rate(), search_depth(), redundant_work(), duty() {}
};
/**
* @brief Displays timing and correctness statistics
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] stats Reference to the Stats object defined in RunTests
* @param[in] elapsed Total elapsed kernel running time
* @param[in] avg_duty Average duty of the BFS kernels
*/
void DisplayStats(
Stats &stats,
double elapsed,
double avg_duty)
{
// Display test name
printf("[%s] finished. ", stats.name);
// Display the specific sample statistics
printf(" elapsed: %.3f ms", elapsed);
if (avg_duty != 0)
{
printf("\n avg CTA duty: %.2f%%", avg_duty * 100);
}
printf("\n");
}
/******************************************************************************
* SALSA Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference SALSA implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] inv_graph Reference to the inversed CSR graph we process on
* @param[in] hrank Host-side vector to store CPU computed hub ranks for each node
* @param[in] arank Host-side vector to store CPU computed authority ranks for each node
* @param[in] max_iter max iteration to go
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void SimpleReferenceSALSA(
const Csr<VertexId, Value, SizeT> &graph,
const Csr<VertexId, Value, SizeT> &inv_graph,
Value *hrank,
Value *arank,
SizeT max_iter)
{
//Preparation
//
//compute SALSA rank
//
CpuTimer cpu_timer;
cpu_timer.Start();
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
printf("CPU BFS finished in %lf msec.\n", elapsed);
}
/**
* @brief Run SALSA tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] inv_graph Reference to the inversed CSR graph we process on
* @param[in] max_iter Max iteration for Page Rank computing
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] max_queue_sizing Max queue sizing for frontier
* @param[in] context CudaContext for moderngpu to use
*
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
const Csr<VertexId, Value, SizeT> &inv_graph,
SizeT max_iter,
int max_grid_size,
int num_gpus,
double max_queue_sizing,
CudaContext& context)
{
typedef SALSAProblem<
VertexId,
SizeT,
Value> Problem;
// Allocate host-side label array (for both reference and gpu-computed results)
Value *reference_hrank = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *reference_arank = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_hrank = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_arank = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *reference_check_h = (g_quick) ? NULL : reference_hrank;
Value *reference_check_a = (g_quick) ? NULL : reference_arank;
// Allocate BFS enactor map
SALSAEnactor<INSTRUMENT> salsa_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
graph,
inv_graph,
num_gpus),
"Problem SALSA Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU SALSA solution for source-distance
//
if (reference_check_h != NULL)
{
printf("compute ref value\n");
SimpleReferenceSALSA(
graph,
inv_graph,
reference_check_h,
reference_check_a,
max_iter);
printf("\n");
}
Stats *stats = new Stats("GPU SALSA");
long long total_queued = 0;
double avg_duty = 0.0;
// Perform BFS
GpuTimer gpu_timer;
util::GRError(
csr_problem->Reset(salsa_enactor.GetFrontierType(), 200.0),
"SALSA Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
salsa_enactor.template Enact<Problem>(
context, csr_problem, max_iter, max_grid_size),
"SALSA Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
salsa_enactor.GetStatistics(total_queued, avg_duty);
double elapsed = gpu_timer.ElapsedMillis();
// Copy out results
util::GRError(
csr_problem->Extract(h_hrank, h_arank),
"SALSA Problem Data Extraction Failed", __FILE__, __LINE__);
// Verify the result
if (reference_check_a != NULL)
{
printf("Validity: ");
CompareResults(h_hrank, reference_check_h, graph.nodes, true);
CompareResults(h_arank, reference_check_a, graph.nodes, true);
}
printf("\nFirst 40 labels of the GPU result.");
// Display Solution
DisplaySolution(h_hrank, h_arank, graph.nodes);
DisplayStats(
*stats,
elapsed,
avg_duty);
// Cleanup
delete stats;
if (csr_problem) delete csr_problem;
if (reference_check_h) free(reference_check_h);
if (reference_check_a) free(reference_check_a);
if (h_hrank) free(h_hrank);
if (h_arank) free(h_arank);
cudaDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] inv_graph Reference to the inversed CSR graph we process on
* @param[in] args Reference to the command line arguments
* @param[in] context CudaContex for moderngpu library
*/
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
Csr<VertexId, Value, SizeT> &inv_graph,
CommandLineArgs &args,
CudaContext& context)
{
SizeT max_iter = 20;
bool instrumented = false; // Whether or not to collect instrumentation from kernels
int max_grid_size = 0; // maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
double max_queue_sizing = 1.0;
instrumented = args.CheckCmdLineFlag("instrumented");
args.GetCmdLineArgument("max-iter", max_iter);
g_quick = true; // missing reference now
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("queue-sizing", max_queue_sizing);
if (instrumented)
{
RunTests<VertexId, Value, SizeT, true>(
graph,
inv_graph,
max_iter,
max_grid_size,
num_gpus,
max_queue_sizing,
context);
}
else
{
RunTests<VertexId, Value, SizeT, false>(
graph,
inv_graph,
max_iter,
max_grid_size,
num_gpus,
max_queue_sizing,
context);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
//DeviceInit(args);
//cudaSetDeviceFlags(cudaDeviceMapHost);
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
//srand(0); // Presently deterministic
//srand(time(NULL));
// Parse graph-contruction params
g_undirected = false;
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market")
{
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier
typedef float Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
Csr<VertexId, Value, SizeT> inv_csr(false);
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0)
{
return 1;
}
if (graphio::BuildMarketGraph<false>(
market_filename,
inv_csr,
g_undirected,
true) != 0)
{
return 1;
}
csr.PrintHistogram();
//csr.DisplayGraph();
//inv_csr.DisplayGraph();
printf("out node:%d, in node:%d\n", csr.out_nodes, inv_csr.out_nodes);
// Run tests
RunTests(csr, inv_csr, args, *context);
}
else
{
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
|
8b9b23c2ec814277cdb911f370ccdee882aadb93.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "util.h"
// host implementation of dot product
double dot_host(const double *x, const double* y, int n) {
double sum = 0;
for(auto i=0; i<n; ++i) {
sum += x[i]*y[i];
}
return sum;
}
// TODO implement dot product kernel
// hint : the result should be a single value in result[0]
__global__
void dot_gpu_kernel(const double *x, const double* y, double *result, int n) {
extern __shared__ double buffer[];
auto i = threadIdx.x;
if(i<n) {
// each thread calculates and stores contribution to the sum
buffer[i] = x[i]*y[i];
// do binary reduction iteratively
auto m = n>>1;
while(m) {
__syncthreads();
if(i<m) {
buffer[i] += buffer[i+m];
}
m>>=1;
}
// thread 0 writes the result
if(i==0) {
result[0]= buffer[0];
}
}
}
double dot_gpu(const double *x, const double* y, int n) {
static double* result = malloc_device<double>(1);
hipLaunchKernelGGL(( dot_gpu_kernel), dim3(1), dim3(n), n*sizeof(double), 0, x, y, result, n);
double r;
copy_to_host<double>(result, &r, 1);
return r;
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 4);
size_t n = (1 << pow);
auto size_in_bytes = n * sizeof(double);
std::cout << "dot product CUDA of length n = " << n
<< " : " << size_in_bytes/(1024.*1024.) << "MB"
<< std::endl;
hipInit(0);
auto x_h = malloc_host<double>(n, 2.);
auto y_h = malloc_host<double>(n);
for(auto i=0; i<n; ++i) {
y_h[i] = rand()%10;
}
auto x_d = malloc_device<double>(n);
auto y_d = malloc_device<double>(n);
// copy initial conditions to device
copy_to_device<double>(x_h, x_d, n);
copy_to_device<double>(y_h, y_d, n);
auto result = dot_gpu(x_d, y_d, n);
auto expected = dot_host(x_h, y_h, n);
std::cout << "expected " << expected << " got " << result << std::endl;
return 0;
}
|
8b9b23c2ec814277cdb911f370ccdee882aadb93.cu
|
#include <iostream>
#include <cuda.h>
#include "util.h"
// host implementation of dot product
double dot_host(const double *x, const double* y, int n) {
double sum = 0;
for(auto i=0; i<n; ++i) {
sum += x[i]*y[i];
}
return sum;
}
// TODO implement dot product kernel
// hint : the result should be a single value in result[0]
__global__
void dot_gpu_kernel(const double *x, const double* y, double *result, int n) {
extern __shared__ double buffer[];
auto i = threadIdx.x;
if(i<n) {
// each thread calculates and stores contribution to the sum
buffer[i] = x[i]*y[i];
// do binary reduction iteratively
auto m = n>>1;
while(m) {
__syncthreads();
if(i<m) {
buffer[i] += buffer[i+m];
}
m>>=1;
}
// thread 0 writes the result
if(i==0) {
result[0]= buffer[0];
}
}
}
double dot_gpu(const double *x, const double* y, int n) {
static double* result = malloc_device<double>(1);
dot_gpu_kernel<<<1, n, n*sizeof(double)>>>(x, y, result, n);
double r;
copy_to_host<double>(result, &r, 1);
return r;
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 4);
size_t n = (1 << pow);
auto size_in_bytes = n * sizeof(double);
std::cout << "dot product CUDA of length n = " << n
<< " : " << size_in_bytes/(1024.*1024.) << "MB"
<< std::endl;
cuInit(0);
auto x_h = malloc_host<double>(n, 2.);
auto y_h = malloc_host<double>(n);
for(auto i=0; i<n; ++i) {
y_h[i] = rand()%10;
}
auto x_d = malloc_device<double>(n);
auto y_d = malloc_device<double>(n);
// copy initial conditions to device
copy_to_device<double>(x_h, x_d, n);
copy_to_device<double>(y_h, y_d, n);
auto result = dot_gpu(x_d, y_d, n);
auto expected = dot_host(x_h, y_h, n);
std::cout << "expected " << expected << " got " << result << std::endl;
return 0;
}
|
0e122d949892bd836a2ea30a2553b0772ba80567.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
/* #define CHECK(call){
const hipError_t error = call;
if(error != hipSuccess){
printf("Error: %s:%d, \n", __FILE__, __LINE__);
printf("Code: %d, reason: %s \n", error, hipGetErrorString(error));
}
} */
void checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon = 1.0E-8;
bool match = 1;
for(int i = 0; i<N; i++){
if(abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("Arrays do not match!\n");
printf("Host %5.2f GPU %5.2f at current %d", hostRef[i], gpuRef[i], i);
break;
}
}
if(match) printf("Arrays match. \n \n");
}
void initialData(float* ip, int size){
time_t t;
srand((unsigned int)time(&t));
for(int i = 0; i<size; i++){
ip[i] = (float)(rand() & 0xFF)/10.0f;
}
}
void sumArrayOnHost(float* A, float* B, float* C, const int N){
for(int idx = 0; idx<N; idx++){
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArrayOnGPU(float *A, float *B, float *C){
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
int main(int argc, char **argv){
printf("%s Starting... \n", argv[0]);
// set up device;
int dev = 0;
hipSetDevice(dev);
// set up data size of vectors
int nElem = 32;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
hipMalloc((float **)&d_A, nBytes);
hipMalloc((float **)&d_B, nBytes);
hipMalloc((float **)&d_C, nBytes);
// transfer data from host to device
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
// invoke kernel at host side
dim3 block(nElem);
dim3 grid(nElem/block.x);
hipLaunchKernelGGL(( sumArrayOnGPU), dim3(grid), dim3(block) , 0, 0, d_A, d_B, d_C);
printf("Execution configuration <<< %d, %d >>>\n", grid.x, block.x);
// copy kernel result back to host side
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
// add vector at host side for result checks
sumArrayOnHost(h_A, h_B, hostRef, nElem);
// check device result
checkResult(hostRef, gpuRef, nElem);
// free device global memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return 0;
}
|
0e122d949892bd836a2ea30a2553b0772ba80567.cu
|
#include <cuda_runtime.h>
#include <stdio.h>
/* #define CHECK(call){
const cudaError_t error = call;
if(error != cudaSuccess){
printf("Error: %s:%d, \n", __FILE__, __LINE__);
printf("Code: %d, reason: %s \n", error, cudaGetErrorString(error));
}
} */
void checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon = 1.0E-8;
bool match = 1;
for(int i = 0; i<N; i++){
if(abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("Arrays do not match!\n");
printf("Host %5.2f GPU %5.2f at current %d", hostRef[i], gpuRef[i], i);
break;
}
}
if(match) printf("Arrays match. \n \n");
}
void initialData(float* ip, int size){
time_t t;
srand((unsigned int)time(&t));
for(int i = 0; i<size; i++){
ip[i] = (float)(rand() & 0xFF)/10.0f;
}
}
void sumArrayOnHost(float* A, float* B, float* C, const int N){
for(int idx = 0; idx<N; idx++){
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArrayOnGPU(float *A, float *B, float *C){
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
int main(int argc, char **argv){
printf("%s Starting... \n", argv[0]);
// set up device;
int dev = 0;
cudaSetDevice(dev);
// set up data size of vectors
int nElem = 32;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((float **)&d_A, nBytes);
cudaMalloc((float **)&d_B, nBytes);
cudaMalloc((float **)&d_C, nBytes);
// transfer data from host to device
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
// invoke kernel at host side
dim3 block(nElem);
dim3 grid(nElem/block.x);
sumArrayOnGPU<<< grid, block >>>(d_A, d_B, d_C);
printf("Execution configuration <<< %d, %d >>>\n", grid.x, block.x);
// copy kernel result back to host side
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// add vector at host side for result checks
sumArrayOnHost(h_A, h_B, hostRef, nElem);
// check device result
checkResult(hostRef, gpuRef, nElem);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return 0;
}
|
52b609aed831ac1b133d4ba2e03210ba71616d57.hip
|
// !!! This is a file automatically generated by hipify!!!
/* =========================================================================
Copyright (c) 2010-2014, Institute for Microelectronics,
Institute for Analysis and Scientific Computing,
TU Wien.
Portions of this software are copyright by UChicago Argonne, LLC.
-----------------
ViennaCL - The Vienna Computing Library
-----------------
Project Head: Karl Rupp [email protected]
(A list of authors and contributors can be found in the PDF manual)
License: MIT (X11), see file LICENSE in the base directory
============================================================================= */
/*
*
* Tutorial: Use ViennaCL with user-provided CUDA buffers
*
*/
//
// include necessary system headers
//
#include <iostream>
#include <cstdlib>
#include <string>
#include <hip/hip_runtime.h>
//
// ViennaCL includes
//
#include "viennacl/vector.hpp"
#include "viennacl/matrix.hpp"
#include "viennacl/linalg/matrix_operations.hpp"
#include "viennacl/linalg/norm_2.hpp"
#include "viennacl/linalg/prod.hpp"
// Some helper functions for this tutorial:
#include "Random.hpp"
//
// A simple CUDA kernel for the vector operation x += y
//
template<typename T>
__global__ void my_inplace_add_kernel(T * vec1, T * vec2, unsigned int size)
{
for (unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
i < size;
i += gridDim.x * blockDim.x)
vec1[i] += vec2[i];
}
int main()
{
typedef float ScalarType;
//
// Part 1: Allocate some CUDA memory
//
std::size_t size = 10;
ScalarType *cuda_x;
ScalarType *cuda_y;
hipMalloc(&cuda_x, size * sizeof(ScalarType));
hipMalloc(&cuda_y, size * sizeof(ScalarType));
// Initialize with data
std::vector<ScalarType> host_x(size, 1.0);
std::vector<ScalarType> host_y(size, 2.0);
hipMemcpy(cuda_x, &(host_x[0]), size * sizeof(ScalarType), hipMemcpyHostToDevice);
hipMemcpy(cuda_y, &(host_y[0]), size * sizeof(ScalarType), hipMemcpyHostToDevice);
// run kernel
hipLaunchKernelGGL(( my_inplace_add_kernel), dim3(128), dim3(128), 0, 0, cuda_x, cuda_y, static_cast<unsigned int>(1000));
// copy result back
std::vector<ScalarType> result_cuda(size);
hipMemcpy(&(result_cuda[0]), cuda_x, size * sizeof(ScalarType), hipMemcpyDeviceToHost);
std::cout << "Result with CUDA (native): ";
for (std::size_t i=0; i<size; ++i)
std::cout << result_cuda[i] << " ";
std::cout << std::endl;
//
// Part 2: Now do the same within ViennaCL
//
// wrap the existing CUDA buffers inside ViennaCL vectors
viennacl::vector<ScalarType> vcl_vec1(cuda_x, viennacl::CUDA_MEMORY, size); // Second parameter specifies that this is CUDA memory rather than host memory
viennacl::vector<ScalarType> vcl_vec2(cuda_y, viennacl::CUDA_MEMORY, size); // Second parameter specifies that this is CUDA memory rather than host memory
// reset values to 0 and 1, respectively
vcl_vec1 = viennacl::scalar_vector<ScalarType>(size, ScalarType(1.0));
vcl_vec2 = viennacl::scalar_vector<ScalarType>(size, ScalarType(2.0));
vcl_vec1 += vcl_vec2;
std::cout << "Result with ViennaCL: " << vcl_vec1 << std::endl;
// ViennaCL does not automatically free your buffers (you're still the owner), so don't forget to clean up :-)
hipFree(cuda_x);
hipFree(cuda_y);
//
// That's it.
//
std::cout << "!!!! TUTORIAL COMPLETED SUCCESSFULLY !!!!" << std::endl;
return EXIT_SUCCESS;
}
|
52b609aed831ac1b133d4ba2e03210ba71616d57.cu
|
/* =========================================================================
Copyright (c) 2010-2014, Institute for Microelectronics,
Institute for Analysis and Scientific Computing,
TU Wien.
Portions of this software are copyright by UChicago Argonne, LLC.
-----------------
ViennaCL - The Vienna Computing Library
-----------------
Project Head: Karl Rupp [email protected]
(A list of authors and contributors can be found in the PDF manual)
License: MIT (X11), see file LICENSE in the base directory
============================================================================= */
/*
*
* Tutorial: Use ViennaCL with user-provided CUDA buffers
*
*/
//
// include necessary system headers
//
#include <iostream>
#include <cstdlib>
#include <string>
#include <cuda.h>
//
// ViennaCL includes
//
#include "viennacl/vector.hpp"
#include "viennacl/matrix.hpp"
#include "viennacl/linalg/matrix_operations.hpp"
#include "viennacl/linalg/norm_2.hpp"
#include "viennacl/linalg/prod.hpp"
// Some helper functions for this tutorial:
#include "Random.hpp"
//
// A simple CUDA kernel for the vector operation x += y
//
template<typename T>
__global__ void my_inplace_add_kernel(T * vec1, T * vec2, unsigned int size)
{
for (unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
i < size;
i += gridDim.x * blockDim.x)
vec1[i] += vec2[i];
}
int main()
{
typedef float ScalarType;
//
// Part 1: Allocate some CUDA memory
//
std::size_t size = 10;
ScalarType *cuda_x;
ScalarType *cuda_y;
cudaMalloc(&cuda_x, size * sizeof(ScalarType));
cudaMalloc(&cuda_y, size * sizeof(ScalarType));
// Initialize with data
std::vector<ScalarType> host_x(size, 1.0);
std::vector<ScalarType> host_y(size, 2.0);
cudaMemcpy(cuda_x, &(host_x[0]), size * sizeof(ScalarType), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_y, &(host_y[0]), size * sizeof(ScalarType), cudaMemcpyHostToDevice);
// run kernel
my_inplace_add_kernel<<<128, 128>>>(cuda_x, cuda_y, static_cast<unsigned int>(1000));
// copy result back
std::vector<ScalarType> result_cuda(size);
cudaMemcpy(&(result_cuda[0]), cuda_x, size * sizeof(ScalarType), cudaMemcpyDeviceToHost);
std::cout << "Result with CUDA (native): ";
for (std::size_t i=0; i<size; ++i)
std::cout << result_cuda[i] << " ";
std::cout << std::endl;
//
// Part 2: Now do the same within ViennaCL
//
// wrap the existing CUDA buffers inside ViennaCL vectors
viennacl::vector<ScalarType> vcl_vec1(cuda_x, viennacl::CUDA_MEMORY, size); // Second parameter specifies that this is CUDA memory rather than host memory
viennacl::vector<ScalarType> vcl_vec2(cuda_y, viennacl::CUDA_MEMORY, size); // Second parameter specifies that this is CUDA memory rather than host memory
// reset values to 0 and 1, respectively
vcl_vec1 = viennacl::scalar_vector<ScalarType>(size, ScalarType(1.0));
vcl_vec2 = viennacl::scalar_vector<ScalarType>(size, ScalarType(2.0));
vcl_vec1 += vcl_vec2;
std::cout << "Result with ViennaCL: " << vcl_vec1 << std::endl;
// ViennaCL does not automatically free your buffers (you're still the owner), so don't forget to clean up :-)
cudaFree(cuda_x);
cudaFree(cuda_y);
//
// That's it.
//
std::cout << "!!!! TUTORIAL COMPLETED SUCCESSFULLY !!!!" << std::endl;
return EXIT_SUCCESS;
}
|
968357c23dc94df76fe57ebe37a03964020143b2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/core/Array.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/hip/Loops.cuh>
#include <THH/THHTensorInfo.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
namespace at { namespace native {
static constexpr int launch_bound2 = 4;
static constexpr int launch_size_nd = 128;
template <int Dims, typename T, typename IndexType>
__device__ __forceinline__ IndexType indexToOffset(
const cuda::detail::TensorInfo<T, IndexType>& info,
int64_t index,
IndexType size) {
IndexType linearIndex = static_cast<IndexType>(index);
CUDA_KERNEL_ASSERT(linearIndex < size && linearIndex >= -size);
if (linearIndex < 0) {
linearIndex += size;
}
return cuda::detail::IndexToOffset<T, IndexType, Dims>::get(linearIndex, info);
}
template<typename IndexType, typename T>
void dispatchTakePutImpl(const Tensor& input, Tensor& output, const Tensor& index) {
auto inputInfo = cuda::detail::getTensorInfo<T, IndexType>(input);
inputInfo.collapseDims();
auto numel = input.numel();
if (inputInfo.isContiguous()) {
cuda::CUDA_tensor_apply2<T, int64_t>(
output,
index,
[inputInfo, numel] __device__ (
T & out, const int64_t& idx) {
auto offset = indexToOffset<-2, T, IndexType>(inputInfo, idx, numel);
out = inputInfo.data[offset];
});
} else {
cuda::CUDA_tensor_apply2<T, int64_t>(
output,
index,
[inputInfo, numel] __device__ (
T & out, const int64_t& idx) {
auto offset = indexToOffset<-1, T, IndexType>(inputInfo, idx, numel);
out = inputInfo.data[offset];
});
}
}
template<typename T>
void dispatchTakePut(const Tensor& input, Tensor& output, const Tensor& index) {
if (cuda::detail::canUse32BitIndexMath(input)) {
dispatchTakePutImpl<int32_t, T>(input, output, index);
} else {
dispatchTakePutImpl<int64_t, T>(input, output, index);
}
}
template<int nt, int vt, typename func_t>
C10_LAUNCH_BOUNDS_2(nt, launch_bound2)
__global__ void index_elementwise_kernel(int N, func_t f) {
int tid = threadIdx.x;
int nv = nt * vt;
int idx = nv * blockIdx.x + tid;
#pragma unroll
for (int i = 0; i < vt; i++) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template<int nt, int vt, typename func_t>
static void launch_kernel(int64_t N, const func_t& f) {
TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
if (N == 0) {
return;
}
dim3 block(nt);
dim3 grid((N + block.x * vt - 1) / (block.x * vt));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( index_elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, stream, N, f);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename func_t>
void gpu_index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) {
int num_indices = index_size.size();
AT_ASSERT(num_indices == index_stride.size());
AT_ASSERT(num_indices == iter.ntensors() - 2);
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
gpu_index_kernel(sub_iter, index_size, index_stride, f);
}
return;
}
auto sizes = at::detail::Array<int64_t, 25>(0);
auto strides = at::detail::Array<int64_t, 25>(0);
auto index_ptrs = at::detail::Array<char*, 25>(nullptr);
for (int i = 0; i < num_indices; i++) {
sizes[i] = index_size[i];
strides[i] = index_stride[i];
index_ptrs[i] = (char*)iter.data_ptr(i + 2);
}
char* out_ptr = (char*)iter.data_ptr(0);
char* in_ptr = (char*)iter.data_ptr(1);
auto offset_calc = make_offset_calculator<3>(iter);
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), [=]__device__(int idx) {
auto offsets = offset_calc.get(idx);
char* out_data = out_ptr + offsets[0];
char* in_data = in_ptr + offsets[1];
int64_t offset = 0;
#pragma unroll
for (int i = 0; i < num_indices; i++) {
int64_t index = *(int64_t*)(index_ptrs[i] + offsets[2]);
CUDA_KERNEL_ASSERT(index >= -sizes[i] && index < sizes[i] && "index out of bounds");
if (index < 0) {
index += sizes[i];
}
offset += index * strides[i];
}
f(out_data, in_data, offset);
});
}
// The kernels are templated on an opaque, self-aligned type of the correct
// size to avoid redundant kernels for different types of the same size.
template <int N> struct alignas(N) OpaqueType { char data[N]; };
template <typename scalar_t>
void index_fill_kernel_impl(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride,
scalar_t fill_val) {
if (0 == iter.numel()) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
index_fill_kernel_impl(sub_iter, dim, self_dim_size, self_dim_stride, fill_val);
}
return;
}
char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
auto offset_calc = make_offset_calculator<2>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]);
auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx >= -self_dim_size && idx < self_dim_size && "index out of bounds");
if (idx < 0) {
idx += self_dim_size;
}
self_data[idx * self_dim_stride] = fill_val;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
template <typename scalar_t>
void index_copy_kernel_impl(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
index_copy_kernel_impl<scalar_t>(sub_iter, dim, self_dim_size, self_dim_stride);
}
return;
}
char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
char* __restrict__ source_ptr = reinterpret_cast<char*>(iter.data_ptr(2));
auto offset_calc = make_offset_calculator<3>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]);
auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
auto* __restrict__ source_data = reinterpret_cast<scalar_t*>(source_ptr + offsets[2]);
CUDA_KERNEL_ASSERT(idx >= 0 && idx < self_dim_size && "index_copy_(): index out of bounds");
self_data[idx * self_dim_stride] = *source_data;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
template <typename scalar_t>
void index_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
*(scalar_t*)out_data = *(scalar_t*)(in_data + offset);
});
}
template <typename scalar_t>
void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
*(scalar_t*)(out_data + offset) = *(scalar_t*)in_data;
});
}
static void index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_kernel_impl<dtype>(iter, index_size, index_stride);
});
}
static void index_fill_kernel(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride,
const Scalar& source) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(), "index_fill_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto fill_val = source.to<scalar_t>();
auto fill_val_opaque = *reinterpret_cast<dtype*>(&fill_val);
index_fill_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride, fill_val_opaque);
});
}
static void index_copy_kernel(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(), "index_copy_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_copy_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride);
});
}
static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) {
TORCH_CHECK(!accumulate, "index_put does not support accumulate=true");
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_put", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_put_kernel_impl<dtype>(iter, index_size, index_stride);
});
}
static Tensor & masked_select_out_cuda_impl(Tensor & result, const Tensor & self, const Tensor & mask) {
NoNamesGuard guard;
TORCH_CHECK(mask.scalar_type() == ScalarType::Byte || mask.scalar_type() == ScalarType::Bool,
"masked_select: expected BoolTensor or ByteTensor for mask");
TORCH_CHECK(self.scalar_type() == result.scalar_type(),
"masked_select(): self and result must have the same scalar type");
Tensor _mask = (mask.dim() == 0) ? mask.unsqueeze(0) : mask;
Tensor _self = (self.dim() == 0) ? self.unsqueeze(0) : self;
std::tie(_mask, _self) = expand_outplace(_mask, _self);
at::native::index_out(result, _self, c10::List<c10::optional<at::Tensor>>({_mask}));
return result;
}
Tensor masked_select_cuda(const Tensor & self, const Tensor & mask) {
namedinference::compute_broadcast_outnames(self, mask);
Tensor result = at::empty({0}, self.options());
return masked_select_out_cuda_impl(result, self, mask);
}
Tensor & masked_select_out_cuda(Tensor & result, const Tensor & self, const Tensor & mask) {
namedinference::compute_broadcast_outnames(self, mask);
return masked_select_out_cuda_impl(result, self, mask);
}
void take_out_cuda_template(Tensor& output, const Tensor& input, const Tensor& index) {
TORCH_CHECK(output.device().type() == at::kCUDA, "device type of output (", output.device().type(), ") is not GPU");
TORCH_CHECK(input.device().type() == at::kCUDA, "device type of input (", input.device().type(), ") is not GPU");
TORCH_CHECK(index.device().type() == at::kCUDA, "device type of index (", index.device().type(), ") is not GPU");
TORCH_CHECK(output.layout() == Layout::Strided, "take() only supports strided layout, got layout: ", output.layout(), " on output tensor");
TORCH_CHECK(input.layout() == Layout::Strided, "take() only supports strided layout, got layout: ", input.layout(), " on input tensor");
TORCH_CHECK(index.layout() == Layout::Strided, "take() only supports strided layout, got layout: ", index.layout(), " on index tensor");
TORCH_CHECK(output.scalar_type() == input.scalar_type(),
"output and input scalar type must match. but got different types: ", output.scalar_type(), " and ", input.scalar_type());
TORCH_CHECK(index.scalar_type() == kLong, "index must be an int64 tensor");
TensorArg output_arg{ output, "output", 1 };
TensorArg input_arg{ input, "input", 2 };
TensorArg index_arg{ index, "index", 3 };
checkAllSameGPU("take", {output_arg, input_arg, index_arg});
TORCH_CHECK(input.dim() < MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(output.dim() < MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(index.dim() < MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(!(input.numel() == 0 && index.numel() != 0), "tried to take from an empty tensor");
at::assert_no_internal_overlap(output);
at::assert_no_partial_overlap(output, index);
at::assert_no_overlap(output, input);
output.resize_(index.sizes());
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::Half, input.scalar_type(), "take_cuda", [&] {
dispatchTakePut<scalar_t>(input, output, index);
});
}
Tensor take_cuda(const Tensor& self, const Tensor& index) {
auto out = at::empty(index.sizes(), self.options());
take_out_cuda_template(out, self, index);
return out;
}
Tensor& take_out_cuda(Tensor& out, const Tensor& self, const Tensor& index) {
take_out_cuda_template(out, self, index);
return out;
}
namespace {
template <typename mask_t>
void masked_scatter_cuda_impl(Tensor& self, const Tensor& mask, const Tensor& source){
auto srcSize = source.numel();
// Determine our output size
auto totalElements = mask.sum().item<int64_t>();
// The number of `1` elements present in the mask must be <= the
// number of elements available in `src`
TORCH_CHECK(totalElements <= srcSize, "source nElements must be == mask `1` elements");
auto mask_cont = mask.contiguous();
// Use a prefix sum to determine the output locations of the masked elements
auto maskPrefixSum = at::empty_like(mask, mask.options().dtype(kLong));
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
thrust::device_ptr<mask_t> maskData(mask_cont.data_ptr<mask_t>());
thrust::device_ptr<int64_t> maskPrefixSumData(
maskPrefixSum.data_ptr<int64_t>());
// Reference for using static_cast on `init_value`:
// https://github.com/NVIDIA/thrust/issues/1379
thrust::exclusive_scan(
thrust::hip::par(allocator).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()),
maskData,
maskData + mask_cont.numel(),
maskPrefixSumData,
static_cast<int64_t>(0));
// We are getting elements from `src` based on an offset from
// `maskPrefixSum`, so that should be made contiguous too
auto source_contig = source.contiguous();
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(mask_cont)
.add_input(maskPrefixSum)
.build();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Bool,
ScalarType::BFloat16,
ScalarType::Half,
self.scalar_type(),
"masked_scatter_",
[&]() {
auto source_ptr = source_contig.data_ptr<scalar_t>();
gpu_kernel(
iter, [=] GPU_LAMBDA(scalar_t a, mask_t mask, int64_t maskPrefixSum) -> scalar_t {
if (mask) {
return source_ptr[maskPrefixSum];
}
return a;
});
hipGetLastError();
});
}
} // anonymous namespace
Tensor & masked_scatter__cuda(Tensor& self, const Tensor& mask, const Tensor& source) {
at::assert_no_internal_overlap(self);
TORCH_CHECK(
self.scalar_type() == source.scalar_type(),
"masked_scatter: expected self and source to have same dtypes but got",
self.scalar_type(),
" and ",
source.scalar_type());
TensorArg self_arg{self, "self", 1};
TensorArg mask_arg{mask, "mask", 2};
TensorArg source_arg{source, "source", 3};
checkAllSameGPU("masked_scatter_", {self_arg, mask_arg, source_arg});
Tensor b_mask;
std::tie(b_mask) = expand_inplace(self, mask, "masked_scatter_");
if (b_mask.dtype() == ScalarType::Byte) {
TORCH_WARN("masked_scatter_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
"please use a mask with dtype torch.bool instead.");
}
auto mask_dtype = b_mask.scalar_type();
if (mask_dtype == ScalarType::Bool) {
masked_scatter_cuda_impl<bool>(self, b_mask, source);
} else {
masked_scatter_cuda_impl<uint8_t>(self, b_mask, source);
}
return self;
}
REGISTER_DISPATCH(index_stub, &index_kernel);
REGISTER_DISPATCH(index_fill_stub, &index_fill_kernel);
REGISTER_DISPATCH(index_copy_stub, &index_copy_kernel);
REGISTER_DISPATCH(index_put_stub, &index_put_kernel);
}} // namespace at::native
|
968357c23dc94df76fe57ebe37a03964020143b2.cu
|
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/core/Array.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/cuda/Loops.cuh>
#include <THC/THCTensorInfo.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
namespace at { namespace native {
static constexpr int launch_bound2 = 4;
static constexpr int launch_size_nd = 128;
template <int Dims, typename T, typename IndexType>
__device__ __forceinline__ IndexType indexToOffset(
const cuda::detail::TensorInfo<T, IndexType>& info,
int64_t index,
IndexType size) {
IndexType linearIndex = static_cast<IndexType>(index);
CUDA_KERNEL_ASSERT(linearIndex < size && linearIndex >= -size);
if (linearIndex < 0) {
linearIndex += size;
}
return cuda::detail::IndexToOffset<T, IndexType, Dims>::get(linearIndex, info);
}
template<typename IndexType, typename T>
void dispatchTakePutImpl(const Tensor& input, Tensor& output, const Tensor& index) {
auto inputInfo = cuda::detail::getTensorInfo<T, IndexType>(input);
inputInfo.collapseDims();
auto numel = input.numel();
if (inputInfo.isContiguous()) {
cuda::CUDA_tensor_apply2<T, int64_t>(
output,
index,
[inputInfo, numel] __device__ (
T & out, const int64_t& idx) {
auto offset = indexToOffset<-2, T, IndexType>(inputInfo, idx, numel);
out = inputInfo.data[offset];
});
} else {
cuda::CUDA_tensor_apply2<T, int64_t>(
output,
index,
[inputInfo, numel] __device__ (
T & out, const int64_t& idx) {
auto offset = indexToOffset<-1, T, IndexType>(inputInfo, idx, numel);
out = inputInfo.data[offset];
});
}
}
template<typename T>
void dispatchTakePut(const Tensor& input, Tensor& output, const Tensor& index) {
if (cuda::detail::canUse32BitIndexMath(input)) {
dispatchTakePutImpl<int32_t, T>(input, output, index);
} else {
dispatchTakePutImpl<int64_t, T>(input, output, index);
}
}
template<int nt, int vt, typename func_t>
C10_LAUNCH_BOUNDS_2(nt, launch_bound2)
__global__ void index_elementwise_kernel(int N, func_t f) {
int tid = threadIdx.x;
int nv = nt * vt;
int idx = nv * blockIdx.x + tid;
#pragma unroll
for (int i = 0; i < vt; i++) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template<int nt, int vt, typename func_t>
static void launch_kernel(int64_t N, const func_t& f) {
TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
if (N == 0) {
return;
}
dim3 block(nt);
dim3 grid((N + block.x * vt - 1) / (block.x * vt));
auto stream = at::cuda::getCurrentCUDAStream();
index_elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename func_t>
void gpu_index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) {
int num_indices = index_size.size();
AT_ASSERT(num_indices == index_stride.size());
AT_ASSERT(num_indices == iter.ntensors() - 2);
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
gpu_index_kernel(sub_iter, index_size, index_stride, f);
}
return;
}
auto sizes = at::detail::Array<int64_t, 25>(0);
auto strides = at::detail::Array<int64_t, 25>(0);
auto index_ptrs = at::detail::Array<char*, 25>(nullptr);
for (int i = 0; i < num_indices; i++) {
sizes[i] = index_size[i];
strides[i] = index_stride[i];
index_ptrs[i] = (char*)iter.data_ptr(i + 2);
}
char* out_ptr = (char*)iter.data_ptr(0);
char* in_ptr = (char*)iter.data_ptr(1);
auto offset_calc = make_offset_calculator<3>(iter);
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), [=]__device__(int idx) {
auto offsets = offset_calc.get(idx);
char* out_data = out_ptr + offsets[0];
char* in_data = in_ptr + offsets[1];
int64_t offset = 0;
#pragma unroll
for (int i = 0; i < num_indices; i++) {
int64_t index = *(int64_t*)(index_ptrs[i] + offsets[2]);
CUDA_KERNEL_ASSERT(index >= -sizes[i] && index < sizes[i] && "index out of bounds");
if (index < 0) {
index += sizes[i];
}
offset += index * strides[i];
}
f(out_data, in_data, offset);
});
}
// The kernels are templated on an opaque, self-aligned type of the correct
// size to avoid redundant kernels for different types of the same size.
template <int N> struct alignas(N) OpaqueType { char data[N]; };
template <typename scalar_t>
void index_fill_kernel_impl(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride,
scalar_t fill_val) {
if (0 == iter.numel()) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
index_fill_kernel_impl(sub_iter, dim, self_dim_size, self_dim_stride, fill_val);
}
return;
}
char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
auto offset_calc = make_offset_calculator<2>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]);
auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx >= -self_dim_size && idx < self_dim_size && "index out of bounds");
if (idx < 0) {
idx += self_dim_size;
}
self_data[idx * self_dim_stride] = fill_val;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
template <typename scalar_t>
void index_copy_kernel_impl(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
index_copy_kernel_impl<scalar_t>(sub_iter, dim, self_dim_size, self_dim_stride);
}
return;
}
char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
char* __restrict__ source_ptr = reinterpret_cast<char*>(iter.data_ptr(2));
auto offset_calc = make_offset_calculator<3>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]);
auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
auto* __restrict__ source_data = reinterpret_cast<scalar_t*>(source_ptr + offsets[2]);
CUDA_KERNEL_ASSERT(idx >= 0 && idx < self_dim_size && "index_copy_(): index out of bounds");
self_data[idx * self_dim_stride] = *source_data;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
template <typename scalar_t>
void index_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
*(scalar_t*)out_data = *(scalar_t*)(in_data + offset);
});
}
template <typename scalar_t>
void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
*(scalar_t*)(out_data + offset) = *(scalar_t*)in_data;
});
}
static void index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_kernel_impl<dtype>(iter, index_size, index_stride);
});
}
static void index_fill_kernel(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride,
const Scalar& source) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(), "index_fill_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto fill_val = source.to<scalar_t>();
auto fill_val_opaque = *reinterpret_cast<dtype*>(&fill_val);
index_fill_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride, fill_val_opaque);
});
}
static void index_copy_kernel(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(), "index_copy_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_copy_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride);
});
}
static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) {
TORCH_CHECK(!accumulate, "index_put does not support accumulate=true");
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_put", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_put_kernel_impl<dtype>(iter, index_size, index_stride);
});
}
static Tensor & masked_select_out_cuda_impl(Tensor & result, const Tensor & self, const Tensor & mask) {
NoNamesGuard guard;
TORCH_CHECK(mask.scalar_type() == ScalarType::Byte || mask.scalar_type() == ScalarType::Bool,
"masked_select: expected BoolTensor or ByteTensor for mask");
TORCH_CHECK(self.scalar_type() == result.scalar_type(),
"masked_select(): self and result must have the same scalar type");
Tensor _mask = (mask.dim() == 0) ? mask.unsqueeze(0) : mask;
Tensor _self = (self.dim() == 0) ? self.unsqueeze(0) : self;
std::tie(_mask, _self) = expand_outplace(_mask, _self);
at::native::index_out(result, _self, c10::List<c10::optional<at::Tensor>>({_mask}));
return result;
}
Tensor masked_select_cuda(const Tensor & self, const Tensor & mask) {
namedinference::compute_broadcast_outnames(self, mask);
Tensor result = at::empty({0}, self.options());
return masked_select_out_cuda_impl(result, self, mask);
}
Tensor & masked_select_out_cuda(Tensor & result, const Tensor & self, const Tensor & mask) {
namedinference::compute_broadcast_outnames(self, mask);
return masked_select_out_cuda_impl(result, self, mask);
}
void take_out_cuda_template(Tensor& output, const Tensor& input, const Tensor& index) {
TORCH_CHECK(output.device().type() == at::kCUDA, "device type of output (", output.device().type(), ") is not GPU");
TORCH_CHECK(input.device().type() == at::kCUDA, "device type of input (", input.device().type(), ") is not GPU");
TORCH_CHECK(index.device().type() == at::kCUDA, "device type of index (", index.device().type(), ") is not GPU");
TORCH_CHECK(output.layout() == Layout::Strided, "take() only supports strided layout, got layout: ", output.layout(), " on output tensor");
TORCH_CHECK(input.layout() == Layout::Strided, "take() only supports strided layout, got layout: ", input.layout(), " on input tensor");
TORCH_CHECK(index.layout() == Layout::Strided, "take() only supports strided layout, got layout: ", index.layout(), " on index tensor");
TORCH_CHECK(output.scalar_type() == input.scalar_type(),
"output and input scalar type must match. but got different types: ", output.scalar_type(), " and ", input.scalar_type());
TORCH_CHECK(index.scalar_type() == kLong, "index must be an int64 tensor");
TensorArg output_arg{ output, "output", 1 };
TensorArg input_arg{ input, "input", 2 };
TensorArg index_arg{ index, "index", 3 };
checkAllSameGPU("take", {output_arg, input_arg, index_arg});
TORCH_CHECK(input.dim() < MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(output.dim() < MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(index.dim() < MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(!(input.numel() == 0 && index.numel() != 0), "tried to take from an empty tensor");
at::assert_no_internal_overlap(output);
at::assert_no_partial_overlap(output, index);
at::assert_no_overlap(output, input);
output.resize_(index.sizes());
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::Half, input.scalar_type(), "take_cuda", [&] {
dispatchTakePut<scalar_t>(input, output, index);
});
}
Tensor take_cuda(const Tensor& self, const Tensor& index) {
auto out = at::empty(index.sizes(), self.options());
take_out_cuda_template(out, self, index);
return out;
}
Tensor& take_out_cuda(Tensor& out, const Tensor& self, const Tensor& index) {
take_out_cuda_template(out, self, index);
return out;
}
namespace {
template <typename mask_t>
void masked_scatter_cuda_impl(Tensor& self, const Tensor& mask, const Tensor& source){
auto srcSize = source.numel();
// Determine our output size
auto totalElements = mask.sum().item<int64_t>();
// The number of `1` elements present in the mask must be <= the
// number of elements available in `src`
TORCH_CHECK(totalElements <= srcSize, "source nElements must be == mask `1` elements");
auto mask_cont = mask.contiguous();
// Use a prefix sum to determine the output locations of the masked elements
auto maskPrefixSum = at::empty_like(mask, mask.options().dtype(kLong));
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
thrust::device_ptr<mask_t> maskData(mask_cont.data_ptr<mask_t>());
thrust::device_ptr<int64_t> maskPrefixSumData(
maskPrefixSum.data_ptr<int64_t>());
// Reference for using static_cast on `init_value`:
// https://github.com/NVIDIA/thrust/issues/1379
thrust::exclusive_scan(
thrust::cuda::par(allocator).on(c10::cuda::getCurrentCUDAStream()),
maskData,
maskData + mask_cont.numel(),
maskPrefixSumData,
static_cast<int64_t>(0));
// We are getting elements from `src` based on an offset from
// `maskPrefixSum`, so that should be made contiguous too
auto source_contig = source.contiguous();
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(mask_cont)
.add_input(maskPrefixSum)
.build();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Bool,
ScalarType::BFloat16,
ScalarType::Half,
self.scalar_type(),
"masked_scatter_",
[&]() {
auto source_ptr = source_contig.data_ptr<scalar_t>();
gpu_kernel(
iter, [=] GPU_LAMBDA(scalar_t a, mask_t mask, int64_t maskPrefixSum) -> scalar_t {
if (mask) {
return source_ptr[maskPrefixSum];
}
return a;
});
cudaGetLastError();
});
}
} // anonymous namespace
Tensor & masked_scatter__cuda(Tensor& self, const Tensor& mask, const Tensor& source) {
at::assert_no_internal_overlap(self);
TORCH_CHECK(
self.scalar_type() == source.scalar_type(),
"masked_scatter: expected self and source to have same dtypes but got",
self.scalar_type(),
" and ",
source.scalar_type());
TensorArg self_arg{self, "self", 1};
TensorArg mask_arg{mask, "mask", 2};
TensorArg source_arg{source, "source", 3};
checkAllSameGPU("masked_scatter_", {self_arg, mask_arg, source_arg});
Tensor b_mask;
std::tie(b_mask) = expand_inplace(self, mask, "masked_scatter_");
if (b_mask.dtype() == ScalarType::Byte) {
TORCH_WARN("masked_scatter_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
"please use a mask with dtype torch.bool instead.");
}
auto mask_dtype = b_mask.scalar_type();
if (mask_dtype == ScalarType::Bool) {
masked_scatter_cuda_impl<bool>(self, b_mask, source);
} else {
masked_scatter_cuda_impl<uint8_t>(self, b_mask, source);
}
return self;
}
REGISTER_DISPATCH(index_stub, &index_kernel);
REGISTER_DISPATCH(index_fill_stub, &index_fill_kernel);
REGISTER_DISPATCH(index_copy_stub, &index_copy_kernel);
REGISTER_DISPATCH(index_put_stub, &index_put_kernel);
}} // namespace at::native
|
6bb9cccd461636ef82e65c02e05beb99347f9cff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Simple example on using the Unified Memory
https://devblogs.nvidia.com/parallelforall/
To compile
nvcc managedMemoryAdd.cu -o managedMemoryAdd
To profile
nvprof ./managedMemoryAdd
*/
#include <iostream>
#include <stdio.h>
#include <math.h>
// Simple kernel to add elements
__global__ void addSingleThread(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
__global__ void addMoreThreads(int n, float *x, float *y)
{
// Let the kernel calculate which part of the input signal to play with
int index = threadIdx.x;
int stride = blockDim.x;
// Just did this to keep the syntax similar to the previous example
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
__global__ void addGridThreads(int n, float *x, float *y)
{
// Let the kernel calculate which part of the input signal to play with, but
// now also include the grid information
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
// N will be 1 million (1048576)
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
// On this case a grid made of one block, where this block has 1 thread
hipLaunchKernelGGL(( addSingleThread), dim3(1), dim3(1), 0, 0, N, x, y);
// Now we have a grid of one block and this block has 256 threads
hipLaunchKernelGGL(( addMoreThreads), dim3(1), dim3(256), 0, 0, N, x, y);
// Now we calculate the grid dimensions
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
printf("GridSize(in blocks):%d BlockSize(in threads):%d\n",numBlocks,blockSize);
hipLaunchKernelGGL(( addGridThreads), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
|
6bb9cccd461636ef82e65c02e05beb99347f9cff.cu
|
/*
Simple example on using the Unified Memory
https://devblogs.nvidia.com/parallelforall/
To compile
nvcc managedMemoryAdd.cu -o managedMemoryAdd
To profile
nvprof ./managedMemoryAdd
*/
#include <iostream>
#include <stdio.h>
#include <math.h>
// Simple kernel to add elements
__global__ void addSingleThread(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
__global__ void addMoreThreads(int n, float *x, float *y)
{
// Let the kernel calculate which part of the input signal to play with
int index = threadIdx.x;
int stride = blockDim.x;
// Just did this to keep the syntax similar to the previous example
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
__global__ void addGridThreads(int n, float *x, float *y)
{
// Let the kernel calculate which part of the input signal to play with, but
// now also include the grid information
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
// N will be 1 million (1048576)
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
// On this case a grid made of one block, where this block has 1 thread
addSingleThread<<<1, 1>>>(N, x, y);
// Now we have a grid of one block and this block has 256 threads
addMoreThreads<<<1, 256>>>(N, x, y);
// Now we calculate the grid dimensions
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
printf("GridSize(in blocks):%d BlockSize(in threads):%d\n",numBlocks,blockSize);
addGridThreads<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
855be888e9824236b5fe9bd9ec361693c5023fbe.hip
|
// !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
#include "device_launch_parameters.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 0.1f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
float *dev_pos;
float *dev_color;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
__global__ void kernAddColor(float* dev_color, int N) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
dev_color[3 * i] = 0.9f;;
dev_color[3 * i + 1] = 1.0f;
dev_color[3 * i + 2] = 0.3f;
}
}
__global__ void kernAddColor2(float* dev_color, int N1, int N2) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N2) {
dev_color[3 * N1 + 3 * i] = 1.0f;;
dev_color[3 * N1 + 3 * i + 1] = 0.2f;
dev_color[3 * N1 + 3 * i + 2] = 0.0f;
}
}
void ScanMatching::copyToDevice(int N1, int N2, float* xpoints, float* ypoints) {
hipMemcpy(dev_pos, xpoints, sizeof(float) * 3 * N1, hipMemcpyHostToDevice);
hipMemcpy(dev_pos + (3 * N1), ypoints, sizeof(float) * 3 * N2, hipMemcpyHostToDevice);
}
/**
* Initialize memory, update some globals
*/
void ScanMatching::initSimulation(int N1, int N2, float* xpoints, float* ypoints) {
int N = N1 + N2;
numObjects = N;
hipMalloc((void**)&dev_pos, 3 * N * sizeof(float));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_color, 3 * N * sizeof(float));
checkCUDAErrorWithLine("hipMalloc dev_color failed!");
ScanMatching::copyToDevice(N1, N2, xpoints, ypoints);
dim3 fullBlocksPerGrid1((N1 + blockSize - 1) / blockSize);
kernAddColor << <fullBlocksPerGrid1, blockSize >> > (dev_color, N1);
dim3 fullBlocksPerGrid2((N2 + blockSize - 1) / blockSize);
kernAddColor2 << <fullBlocksPerGrid2, blockSize >> > (dev_color, N1, N2);
hipDeviceSynchronize();
}
float* ScanMatching::getDevPos() {
return dev_pos;
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, float *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[3 * index] * c_scale;
vbo[4 * index + 1] = pos[3 * index + 1] * c_scale;
vbo[4 * index + 2] = pos[3 * index + 2] * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, float *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[3 * index] + 0.3f;
vbo[4 * index + 1] = vel[3 * index + 1] + 0.3f;
vbo[4 * index + 2] = vel[3 * index + 2] + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void ScanMatching::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_color, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
void ScanMatching::endSimulation() {
hipFree(dev_pos);
hipFree(dev_color);
}
|
855be888e9824236b5fe9bd9ec361693c5023fbe.cu
|
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
#include "device_launch_parameters.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 0.1f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
float *dev_pos;
float *dev_color;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
__global__ void kernAddColor(float* dev_color, int N) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
dev_color[3 * i] = 0.9f;;
dev_color[3 * i + 1] = 1.0f;
dev_color[3 * i + 2] = 0.3f;
}
}
__global__ void kernAddColor2(float* dev_color, int N1, int N2) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N2) {
dev_color[3 * N1 + 3 * i] = 1.0f;;
dev_color[3 * N1 + 3 * i + 1] = 0.2f;
dev_color[3 * N1 + 3 * i + 2] = 0.0f;
}
}
void ScanMatching::copyToDevice(int N1, int N2, float* xpoints, float* ypoints) {
cudaMemcpy(dev_pos, xpoints, sizeof(float) * 3 * N1, cudaMemcpyHostToDevice);
cudaMemcpy(dev_pos + (3 * N1), ypoints, sizeof(float) * 3 * N2, cudaMemcpyHostToDevice);
}
/**
* Initialize memory, update some globals
*/
void ScanMatching::initSimulation(int N1, int N2, float* xpoints, float* ypoints) {
int N = N1 + N2;
numObjects = N;
cudaMalloc((void**)&dev_pos, 3 * N * sizeof(float));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_color, 3 * N * sizeof(float));
checkCUDAErrorWithLine("cudaMalloc dev_color failed!");
ScanMatching::copyToDevice(N1, N2, xpoints, ypoints);
dim3 fullBlocksPerGrid1((N1 + blockSize - 1) / blockSize);
kernAddColor << <fullBlocksPerGrid1, blockSize >> > (dev_color, N1);
dim3 fullBlocksPerGrid2((N2 + blockSize - 1) / blockSize);
kernAddColor2 << <fullBlocksPerGrid2, blockSize >> > (dev_color, N1, N2);
cudaDeviceSynchronize();
}
float* ScanMatching::getDevPos() {
return dev_pos;
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, float *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[3 * index] * c_scale;
vbo[4 * index + 1] = pos[3 * index + 1] * c_scale;
vbo[4 * index + 2] = pos[3 * index + 2] * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, float *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[3 * index] + 0.3f;
vbo[4 * index + 1] = vel[3 * index + 1] + 0.3f;
vbo[4 * index + 2] = vel[3 * index + 2] + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void ScanMatching::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_color, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
void ScanMatching::endSimulation() {
cudaFree(dev_pos);
cudaFree(dev_color);
}
|
47f03ebe9b1cc2bbe719449106635f82108c5241.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <THH/THHBlas.h>
#include <THH/THHGeneral.h>
#include <TH/THHalf.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPBlas.h>
#include <algorithm>
#include <mutex>
#ifdef __HIP_PLATFORM_HCC__
#include <hip/hip_version.h>
#endif
/* Level 2 */
void adjustLdLevel2(int64_t m, int64_t n, int64_t *lda)
{
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
// TODO: why does Level3 check trans but this doesn't?
if (n <= 1)
*lda = std::max<int64_t>(m, 1);
}
void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Sger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Dger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
hipblasOperation_t convertTransToCublasOperation(char trans) {
if (trans == 't') return HIPBLAS_OP_T;
else if (trans == 'n') return HIPBLAS_OP_N;
else if (trans == 'c') return HIPBLAS_OP_C;
else {
THError("trans must be one of: t, n, c");
return HIPBLAS_OP_T;
}
}
void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc)
{
int transa_ = ((transa == 't') || (transa == 'T'));
int transb_ = ((transb == 't') || (transb == 'T'));
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
if(n <= 1)
*ldc = std::max<int64_t>(m, 1);
if(transa_)
{
if(m <= 1)
*lda = std::max<int64_t>(k, 1);
}
else
{
if(k <= 1)
*lda = std::max<int64_t>(m, 1);
}
if(transb_)
{
if(k <= 1)
*ldb = std::max<int64_t>(n, 1);
}
else
{
if(n <= 1)
*ldb = std::max<int64_t>(k, 1);
}
}
/* Level 3 */
void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
at::cuda::blas::gemm<float>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
// In CUDA 8.0, definition of data types for sgemmex changed
#if TORCH_HIP_VERSION < 8000
# define HIP_R_16F HIPBLAS_DATA_HALF
#endif
void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, at::Half *a, int64_t lda, at::Half *b, int64_t ldb, at::Half beta, at::Half *c, int64_t ldc)
{
at::cuda::blas::gemm<at::Half>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
void THCudaBlas_Bgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::BFloat16 alpha, at::BFloat16 *a, int64_t lda, at::BFloat16 *b, int64_t ldb, at::BFloat16 beta, at::BFloat16 *c, int64_t ldc)
{
at::cuda::blas::gemm<at::BFloat16>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
at::cuda::blas::gemm<double>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
void THCudaBlas_HgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::Half alpha, const at::Half *a, int64_t lda, int64_t strideA, const at::Half *b, int64_t ldb, int64_t strideB,
at::Half beta, at::Half *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
#ifdef __HIP_PLATFORM_HCC__
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_f16_r, (int)lda, strideA,
b, rocblas_datatype_f16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_f16_r, (int)ldc, strideC,
c, rocblas_datatype_f16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0));
#else
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION < 11000
// On CUDA versions prior to 11, users are required to set the math mode to CUBLAS_TENSOR_OP_MATH
// manually to be able to use tensor cores for FP16. On CUDA 11, this is no longer required.
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
#endif // TORCH_HIP_VERSION < 11000
THCublasCheck(hipblasGemmStridedBatchedEx(handle,
opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, HIP_R_16F, (int)lda, strideA,
b, HIP_R_16F, (int)ldb, strideB,
(void*)&fBeta, c, HIP_R_16F, (int)ldc, strideC,
(int)batchCount, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION < 11000
// On CUDA versions prior to 11, users are required to set the math mode to CUBLAS_TENSOR_OP_MATH
// manually to be able to use tensor cores for FP16. On CUDA 11, this is no longer required.
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
#endif // TORCH_HIP_VERSION < 11000
#endif // __HIP_PLATFORM_HCC__
}
void THCudaBlas_BgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::BFloat16 alpha, const at::BFloat16 *a, int64_t lda, int64_t strideA, const at::BFloat16 *b, int64_t ldb, int64_t strideB,
at::BFloat16 beta, at::BFloat16 *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000
hipDeviceProp_t* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major < 8) {
TORCH_CHECK(false, "BFloat16 gemm in CUDA requires Ampere or later GPU");
}
THCublasCheck(hipblasGemmStridedBatchedEx(handle,
opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, CUDA_R_16BF, (int)lda, strideA,
b, CUDA_R_16BF, (int)ldb, strideB,
(void*)&fBeta, c, CUDA_R_16BF, (int)ldc, strideC,
(int)batchCount, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#elif defined(__HIP_PLATFORM_HCC__)
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_bf16_r, (int)lda, strideA,
b, rocblas_datatype_bf16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_bf16_r, (int)ldc, strideC,
c, rocblas_datatype_bf16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0, NULL, NULL));
#else
TORCH_CHECK(false, "THCudaBlas_BgemmStridedBatched is only available on TORCH_HIP_VERSION >= 11");
#endif // defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000
}
void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb,
float beta, float *c[], int64_t ldc, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_SgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasSgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB,
float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasSgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb,
double beta, double *c[], int64_t ldc, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_DgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasDgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB,
double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasDgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
|
47f03ebe9b1cc2bbe719449106635f82108c5241.cu
|
#include <THC/THCBlas.h>
#include <THC/THCGeneral.h>
#include <TH/THHalf.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDABlas.h>
#include <algorithm>
#include <mutex>
#ifdef __HIP_PLATFORM_HCC__
#include <hip/hip_version.h>
#endif
/* Level 2 */
void adjustLdLevel2(int64_t m, int64_t n, int64_t *lda)
{
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
// TODO: why does Level3 check trans but this doesn't?
if (n <= 1)
*lda = std::max<int64_t>(m, 1);
}
void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Sger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Dger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
cublasOperation_t convertTransToCublasOperation(char trans) {
if (trans == 't') return CUBLAS_OP_T;
else if (trans == 'n') return CUBLAS_OP_N;
else if (trans == 'c') return CUBLAS_OP_C;
else {
THError("trans must be one of: t, n, c");
return CUBLAS_OP_T;
}
}
void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc)
{
int transa_ = ((transa == 't') || (transa == 'T'));
int transb_ = ((transb == 't') || (transb == 'T'));
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
if(n <= 1)
*ldc = std::max<int64_t>(m, 1);
if(transa_)
{
if(m <= 1)
*lda = std::max<int64_t>(k, 1);
}
else
{
if(k <= 1)
*lda = std::max<int64_t>(m, 1);
}
if(transb_)
{
if(k <= 1)
*ldb = std::max<int64_t>(n, 1);
}
else
{
if(n <= 1)
*ldb = std::max<int64_t>(k, 1);
}
}
/* Level 3 */
void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
at::cuda::blas::gemm<float>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
// In CUDA 8.0, definition of data types for sgemmex changed
#if CUDA_VERSION < 8000
# define CUDA_R_16F CUBLAS_DATA_HALF
#endif
void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, at::Half *a, int64_t lda, at::Half *b, int64_t ldb, at::Half beta, at::Half *c, int64_t ldc)
{
at::cuda::blas::gemm<at::Half>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
void THCudaBlas_Bgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::BFloat16 alpha, at::BFloat16 *a, int64_t lda, at::BFloat16 *b, int64_t ldb, at::BFloat16 beta, at::BFloat16 *c, int64_t ldc)
{
at::cuda::blas::gemm<at::BFloat16>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
at::cuda::blas::gemm<double>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
void THCudaBlas_HgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::Half alpha, const at::Half *a, int64_t lda, int64_t strideA, const at::Half *b, int64_t ldb, int64_t strideB,
at::Half beta, at::Half *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
#ifdef __HIP_PLATFORM_HCC__
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_f16_r, (int)lda, strideA,
b, rocblas_datatype_f16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_f16_r, (int)ldc, strideC,
c, rocblas_datatype_f16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0));
#else
#if defined(CUDA_VERSION) && CUDA_VERSION < 11000
// On CUDA versions prior to 11, users are required to set the math mode to CUBLAS_TENSOR_OP_MATH
// manually to be able to use tensor cores for FP16. On CUDA 11, this is no longer required.
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
#endif // CUDA_VERSION < 11000
THCublasCheck(cublasGemmStridedBatchedEx(handle,
opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, CUDA_R_16F, (int)lda, strideA,
b, CUDA_R_16F, (int)ldb, strideB,
(void*)&fBeta, c, CUDA_R_16F, (int)ldc, strideC,
(int)batchCount, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#if defined(CUDA_VERSION) && CUDA_VERSION < 11000
// On CUDA versions prior to 11, users are required to set the math mode to CUBLAS_TENSOR_OP_MATH
// manually to be able to use tensor cores for FP16. On CUDA 11, this is no longer required.
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
#endif // CUDA_VERSION < 11000
#endif // __HIP_PLATFORM_HCC__
}
void THCudaBlas_BgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::BFloat16 alpha, const at::BFloat16 *a, int64_t lda, int64_t strideA, const at::BFloat16 *b, int64_t ldb, int64_t strideB,
at::BFloat16 beta, at::BFloat16 *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major < 8) {
TORCH_CHECK(false, "BFloat16 gemm in CUDA requires Ampere or later GPU");
}
THCublasCheck(cublasGemmStridedBatchedEx(handle,
opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, CUDA_R_16BF, (int)lda, strideA,
b, CUDA_R_16BF, (int)ldb, strideB,
(void*)&fBeta, c, CUDA_R_16BF, (int)ldc, strideC,
(int)batchCount, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#elif defined(__HIP_PLATFORM_HCC__)
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_bf16_r, (int)lda, strideA,
b, rocblas_datatype_bf16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_bf16_r, (int)ldc, strideC,
c, rocblas_datatype_bf16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0, NULL, NULL));
#else
TORCH_CHECK(false, "THCudaBlas_BgemmStridedBatched is only available on CUDA_VERSION >= 11");
#endif // defined(CUDA_VERSION) && CUDA_VERSION >= 11000
}
void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb,
float beta, float *c[], int64_t ldc, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_SgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasSgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB,
float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasSgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb,
double beta, double *c[], int64_t ldc, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_DgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasDgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB,
double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasDgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
|
87ba83e30bc5a7207d233b7ee79c2756b6ace20f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// kernels.cu
// diffusion3d-GPU
//
// Created by Manuel Diaz on 7/26/16.
// Copyright 2016 Manuel Diaz. All rights reserved.
//
extern "C" {
#include "acoustics3d.h"
}
#define checkCuda(error) __checkCuda(error, __FILE__, __LINE__)
__constant__ REAL d_kx;
__constant__ REAL d_ky;
__constant__ REAL d_kz;
/*********************************************/
/* A method for checking error in CUDA calls */
/*********************************************/
inline void __checkCuda(hipError_t error, const char *file, const int line)
{
#if defined(DISPL)
if (error != hipSuccess)
{
printf("checkCuda error at %s:%i: %s\n", file, line, hipGetErrorString(hipGetLastError()));
exit(-1);
}
#endif
return;
}
/********************/
/* Laplace Operator */
/********************/
__global__ void Compute_Laplace3d_O2(
REAL * __restrict__ u,
REAL * __restrict__ Lu,
const unsigned int nx,
const unsigned int ny,
const unsigned int nz)
{
unsigned int i, j, k, o, n, s, e, w, t, b, xy;
xy = nx*ny;
i = threadIdx.x + blockIdx.x * blockDim.x;
j = threadIdx.y + blockIdx.y * blockDim.y;
k = threadIdx.z + blockIdx.z * blockDim.z;
o = i+(nx*j)+(xy*k); // node( j,i,k ) n b
n = o+nx; // node(j+1,i,k) | /
s = o-nx; // node(j-1,i,k) |/
e = o+1; // node(j,i+1,k) w---o---e
w = o-1; // node(j,i-1,k) /|
t = o+xy; // node(j,i,k+1) / |
b = o-xy; // node(j,i,k-1) t s
if (i>0 && i<nx-1 && j>0 && j<ny-1 && k>0 && k<nz-1)
Lu[o] = 12*d_kx*(u[e]-2*u[o]+u[w])+
12*d_ky*(u[n]-2*u[o]+u[s])+
12*d_kz*(u[t]-2*u[o]+u[b]);
// else : do nothing!
}
__global__ void Compute_Laplace3d(
REAL * __restrict__ u,
REAL * __restrict__ Lu,
const unsigned int nx,
const unsigned int ny,
const unsigned int nz)
{
unsigned int i, j, k, o, n, s, e, w, t, b, nn, ss, ee, ww, tt, bb, xy;
xy = nx*ny;
i = threadIdx.x + blockIdx.x * blockDim.x;
j = threadIdx.y + blockIdx.y * blockDim.y;
k = threadIdx.z + blockIdx.z * blockDim.z;
o = i+(nx*j)+(xy*k); // node( j,i,k ) nn bb
nn= o+nx+nx; // node(j+2,i,k) | /
n = o+nx; // node(j+1,i,k) | /
s = o-nx; // node(j-1,i,k) n b
ss= o-nx-nx; // node(j-2,i,k) | /
ee= o+2; // node(j,i+2,k) |/
e = o+1; // node(j,i+1,k) ww--w--o--e--ee
w = o-1; // node(j,i-1,k) /|
ww= o-2; // node(j,i-2,k) / |
tt= o+xy+xy; // node(j,i,k+2) t s
t = o+xy; // node(j,i,k+1) / |
b = o-xy; // node(j,i,k-1) / |
bb= o-xy-xy; // node(j,i,k-2) tt ss
if (i>1 && i<nx-2 && j>1 && j<ny-2 && k>1 && k<nz-2)
Lu[o] = d_kx*(-u[ee]+16*u[e]-30*u[o]+16*u[w]-u[ww])+
d_ky*(-u[nn]+16*u[n]-30*u[o]+16*u[s]-u[ss])+
d_kz*(-u[tt]+16*u[t]-30*u[o]+16*u[b]-u[bb]);
// else : do nothing!
}
/***********************/
/* Runge Kutta Methods */ // <==== this is perfectly parallel!
/***********************/
__global__ void Compute_RK(
REAL * __restrict__ u,
const REAL * __restrict__ uo,
const REAL * __restrict__ Lu,
const unsigned int step,
const unsigned int nx,
const unsigned int ny,
const unsigned int nz,
const REAL dt)
{
// Using (o) = [i+nx*j+nx*ny*k] index
unsigned int i, j, k, o, xy;
xy=nx*ny;
// Compute Runge-Kutta step, local threads indexes
i = blockDim.x * blockIdx.x + threadIdx.x;
j = blockDim.y * blockIdx.y + threadIdx.y;
k = blockDim.z * blockIdx.z + threadIdx.z;
// compute single index
o=i+nx*j+xy*k;
// update only internal cells
if (i>1 && i<nx-2 && j>1 && j<ny-2 && k>1 && k<nz-2)
{
switch (step) {
case 1: // step 1
u[o] = uo[o]+dt*(Lu[o]);
break;
case 2: // step 2
u[o] = 0.75*uo[o]+0.25*(u[o]+dt*(Lu[o]));
break;
case 3: // step 3
u[o] = (uo[o]+2*(u[o]+dt*(Lu[o])))/3;
break;
}
}
// else : do nothing!
}
/*********************/
/* Function Wrappers */
/*********************/
extern "C" void CopyToConstantMemory(const REAL kx, const REAL ky, const REAL kz)
{
checkCuda(hipMemcpyToSymbol(d_kx, &kx, sizeof(REAL), 0, hipMemcpyHostToDevice));
checkCuda(hipMemcpyToSymbol(d_ky, &ky, sizeof(REAL), 0, hipMemcpyHostToDevice));
checkCuda(hipMemcpyToSymbol(d_kz, &kz, sizeof(REAL), 0, hipMemcpyHostToDevice));
}
extern "C" void Call_Lu3d(dim3 numBlocks, dim3 threadsPerBlock, hipStream_t aStream,
unsigned int nx, unsigned int ny, unsigned int nz, REAL *u, REAL *Lu)
{
hipLaunchKernelGGL(( Compute_Laplace3d), dim3(numBlocks),dim3(threadsPerBlock),0,aStream, u,Lu,nx,ny,nz);
}
extern "C" void Call_RK3d(dim3 numBlocks, dim3 threadsPerBlock, hipStream_t aStream,
unsigned int step, unsigned int nx, unsigned int ny, unsigned int nz, const REAL dt, REAL *u, REAL *uo, REAL *Lu)
{
hipLaunchKernelGGL(( Compute_RK), dim3(numBlocks),dim3(threadsPerBlock),0,aStream, u,uo,Lu,step,nx,ny,nz,dt);
}
|
87ba83e30bc5a7207d233b7ee79c2756b6ace20f.cu
|
//
// kernels.cu
// diffusion3d-GPU
//
// Created by Manuel Diaz on 7/26/16.
// Copyright © 2016 Manuel Diaz. All rights reserved.
//
extern "C" {
#include "acoustics3d.h"
}
#define checkCuda(error) __checkCuda(error, __FILE__, __LINE__)
__constant__ REAL d_kx;
__constant__ REAL d_ky;
__constant__ REAL d_kz;
/*********************************************/
/* A method for checking error in CUDA calls */
/*********************************************/
inline void __checkCuda(cudaError_t error, const char *file, const int line)
{
#if defined(DISPL)
if (error != cudaSuccess)
{
printf("checkCuda error at %s:%i: %s\n", file, line, cudaGetErrorString(cudaGetLastError()));
exit(-1);
}
#endif
return;
}
/********************/
/* Laplace Operator */
/********************/
__global__ void Compute_Laplace3d_O2(
REAL * __restrict__ u,
REAL * __restrict__ Lu,
const unsigned int nx,
const unsigned int ny,
const unsigned int nz)
{
unsigned int i, j, k, o, n, s, e, w, t, b, xy;
xy = nx*ny;
i = threadIdx.x + blockIdx.x * blockDim.x;
j = threadIdx.y + blockIdx.y * blockDim.y;
k = threadIdx.z + blockIdx.z * blockDim.z;
o = i+(nx*j)+(xy*k); // node( j,i,k ) n b
n = o+nx; // node(j+1,i,k) | /
s = o-nx; // node(j-1,i,k) |/
e = o+1; // node(j,i+1,k) w---o---e
w = o-1; // node(j,i-1,k) /|
t = o+xy; // node(j,i,k+1) / |
b = o-xy; // node(j,i,k-1) t s
if (i>0 && i<nx-1 && j>0 && j<ny-1 && k>0 && k<nz-1)
Lu[o] = 12*d_kx*(u[e]-2*u[o]+u[w])+
12*d_ky*(u[n]-2*u[o]+u[s])+
12*d_kz*(u[t]-2*u[o]+u[b]);
// else : do nothing!
}
__global__ void Compute_Laplace3d(
REAL * __restrict__ u,
REAL * __restrict__ Lu,
const unsigned int nx,
const unsigned int ny,
const unsigned int nz)
{
unsigned int i, j, k, o, n, s, e, w, t, b, nn, ss, ee, ww, tt, bb, xy;
xy = nx*ny;
i = threadIdx.x + blockIdx.x * blockDim.x;
j = threadIdx.y + blockIdx.y * blockDim.y;
k = threadIdx.z + blockIdx.z * blockDim.z;
o = i+(nx*j)+(xy*k); // node( j,i,k ) nn bb
nn= o+nx+nx; // node(j+2,i,k) | /
n = o+nx; // node(j+1,i,k) | /
s = o-nx; // node(j-1,i,k) n b
ss= o-nx-nx; // node(j-2,i,k) | /
ee= o+2; // node(j,i+2,k) |/
e = o+1; // node(j,i+1,k) ww--w--o--e--ee
w = o-1; // node(j,i-1,k) /|
ww= o-2; // node(j,i-2,k) / |
tt= o+xy+xy; // node(j,i,k+2) t s
t = o+xy; // node(j,i,k+1) / |
b = o-xy; // node(j,i,k-1) / |
bb= o-xy-xy; // node(j,i,k-2) tt ss
if (i>1 && i<nx-2 && j>1 && j<ny-2 && k>1 && k<nz-2)
Lu[o] = d_kx*(-u[ee]+16*u[e]-30*u[o]+16*u[w]-u[ww])+
d_ky*(-u[nn]+16*u[n]-30*u[o]+16*u[s]-u[ss])+
d_kz*(-u[tt]+16*u[t]-30*u[o]+16*u[b]-u[bb]);
// else : do nothing!
}
/***********************/
/* Runge Kutta Methods */ // <==== this is perfectly parallel!
/***********************/
__global__ void Compute_RK(
REAL * __restrict__ u,
const REAL * __restrict__ uo,
const REAL * __restrict__ Lu,
const unsigned int step,
const unsigned int nx,
const unsigned int ny,
const unsigned int nz,
const REAL dt)
{
// Using (o) = [i+nx*j+nx*ny*k] index
unsigned int i, j, k, o, xy;
xy=nx*ny;
// Compute Runge-Kutta step, local threads indexes
i = blockDim.x * blockIdx.x + threadIdx.x;
j = blockDim.y * blockIdx.y + threadIdx.y;
k = blockDim.z * blockIdx.z + threadIdx.z;
// compute single index
o=i+nx*j+xy*k;
// update only internal cells
if (i>1 && i<nx-2 && j>1 && j<ny-2 && k>1 && k<nz-2)
{
switch (step) {
case 1: // step 1
u[o] = uo[o]+dt*(Lu[o]);
break;
case 2: // step 2
u[o] = 0.75*uo[o]+0.25*(u[o]+dt*(Lu[o]));
break;
case 3: // step 3
u[o] = (uo[o]+2*(u[o]+dt*(Lu[o])))/3;
break;
}
}
// else : do nothing!
}
/*********************/
/* Function Wrappers */
/*********************/
extern "C" void CopyToConstantMemory(const REAL kx, const REAL ky, const REAL kz)
{
checkCuda(cudaMemcpyToSymbol(d_kx, &kx, sizeof(REAL), 0, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpyToSymbol(d_ky, &ky, sizeof(REAL), 0, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpyToSymbol(d_kz, &kz, sizeof(REAL), 0, cudaMemcpyHostToDevice));
}
extern "C" void Call_Lu3d(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream,
unsigned int nx, unsigned int ny, unsigned int nz, REAL *u, REAL *Lu)
{
Compute_Laplace3d<<<numBlocks,threadsPerBlock,0,aStream>>>(u,Lu,nx,ny,nz);
}
extern "C" void Call_RK3d(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream,
unsigned int step, unsigned int nx, unsigned int ny, unsigned int nz, const REAL dt, REAL *u, REAL *uo, REAL *Lu)
{
Compute_RK<<<numBlocks,threadsPerBlock,0,aStream>>>(u,uo,Lu,step,nx,ny,nz,dt);
}
|
cf698988bbfcda7e7f362f44a9af8a20610b397a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "lite/utils/float16.h"
#include <gtest/gtest.h>
#include <bitset>
#include <iostream>
#include <typeindex>
#include "lite/utils/cp_logging.h"
#define ARITHMETIC_KERNEL(op_type, sign) \
__global__ void op_type(const half* in1, const half* in2, half* out) { \
out[0] = in1[0] sign in2[0]; \
}
#define COMPOUND_KERNEL(op_type, sign) \
__global__ void op_type(half* in1, const half* in2) { in1[0] sign in2[0]; }
#define COMPARISON_KERNEL(op_type, sign) \
__global__ void op_type(const half* in1, const half* in2, bool* out) { \
out[0] = in1[0] sign in2[0]; \
}
#define ARITHMETIC_KERNEL_LAUNCH(op_type) \
void Test##op_type(float v_in1, float v_in2, float v_out) { \
LOG(INFO) << "Test " << #op_type << " on GPU!"; \
half *in1, *in2, *out; \
half *d_in1, *d_in2, *d_out; \
int size = sizeof(half); \
hipMalloc(reinterpret_cast<void**>(&d_in1), size); \
hipMalloc(reinterpret_cast<void**>(&d_in2), size); \
hipMalloc(reinterpret_cast<void**>(&d_out), size); \
in1 = reinterpret_cast<half*>(malloc(size)); \
in2 = reinterpret_cast<half*>(malloc(size)); \
out = reinterpret_cast<half*>(malloc(size)); \
in1[0] = half(float16(v_in1)); \
in2[0] = half(float16(v_in2)); \
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); \
hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice); \
hipLaunchKernelGGL(( op_type), dim3(1), dim3(1), 0, 0, d_in1, d_in2, d_out); \
hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost); \
EXPECT_EQ(static_cast<float>(float16(out[0])), v_out); \
free(in1); \
free(in2); \
free(out); \
hipFree(d_in1); \
hipFree(d_in2); \
hipFree(d_out); \
}
#define COMPOUND_KERNEL_LAUNCH(op_type) \
void Test##op_type(float v_in1, float v_in2, float v_out) { \
LOG(INFO) << "Test " << #op_type << " on GPU!"; \
half *in1, *in2; \
half *d_in1, *d_in2; \
int size = sizeof(half); \
hipMalloc(reinterpret_cast<void**>(&d_in1), size); \
hipMalloc(reinterpret_cast<void**>(&d_in2), size); \
in1 = reinterpret_cast<half*>(malloc(size)); \
in2 = reinterpret_cast<half*>(malloc(size)); \
in1[0] = half(float16(v_in1)); \
in2[0] = half(float16(v_in2)); \
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); \
hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice); \
hipLaunchKernelGGL(( op_type), dim3(1), dim3(1), 0, 0, d_in1, d_in2); \
hipMemcpy(in1, d_in1, size, hipMemcpyDeviceToHost); \
EXPECT_EQ(static_cast<float>(float16(in1[0])), v_out); \
free(in1); \
free(in2); \
hipFree(d_in1); \
hipFree(d_in2); \
}
#define COMPARISON_KERNEL_LAUNCH(op_type) \
void Test##op_type(float v_in1, float v_in2, bool v_out) { \
LOG(INFO) << "Test " << #op_type << " on GPU!"; \
half *in1, *in2; \
half *d_in1, *d_in2; \
bool *out, *d_out; \
int size = sizeof(half); \
hipMalloc(reinterpret_cast<void**>(&d_in1), size); \
hipMalloc(reinterpret_cast<void**>(&d_in2), size); \
hipMalloc(reinterpret_cast<void**>(&d_out), 1); \
in1 = reinterpret_cast<half*>(malloc(size)); \
in2 = reinterpret_cast<half*>(malloc(size)); \
out = reinterpret_cast<bool*>(malloc(1)); \
in1[0] = half(float16(v_in1)); \
in2[0] = half(float16(v_in2)); \
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); \
hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice); \
hipLaunchKernelGGL(( op_type), dim3(1), dim3(1), 0, 0, d_in1, d_in2, d_out); \
hipMemcpy(out, d_out, 1, hipMemcpyDeviceToHost); \
EXPECT_EQ(out[0], v_out); \
free(in1); \
free(in2); \
free(out); \
hipFree(d_in1); \
hipFree(d_in2); \
hipFree(d_out); \
}
#ifdef LITE_CUDA_FP16
namespace paddle {
namespace lite {
#if TORCH_HIP_VERSION < 9000
ARITHMETIC_KERNEL(Add, +)
ARITHMETIC_KERNEL(Sub, -)
ARITHMETIC_KERNEL(Mul, *)
ARITHMETIC_KERNEL(Div, /)
ARITHMETIC_KERNEL_LAUNCH(Add)
ARITHMETIC_KERNEL_LAUNCH(Sub)
ARITHMETIC_KERNEL_LAUNCH(Mul)
ARITHMETIC_KERNEL_LAUNCH(Div)
// Negative sign kernel
__global__ void Neg(half* in) { in[0] = -in[0]; }
void TestNeg(float v_in, float v_out) {
LOG(INFO) << "Test Neg on GPU!";
half *in, *d_in;
int size = sizeof(half);
hipMalloc(reinterpret_cast<void**>(&d_in), size);
in = reinterpret_cast<half*>(malloc(size));
in[0] = half(float16(v_in));
hipMemcpy(d_in, in, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Neg), dim3(1), dim3(1), 0, 0, d_in);
hipMemcpy(in, d_in, size, hipMemcpyDeviceToHost);
EXPECT_EQ(static_cast<float>(float16(in[0])), v_out);
free(in);
hipFree(d_in);
}
COMPOUND_KERNEL(AddAssign, +=)
COMPOUND_KERNEL(SubAssign, -=)
COMPOUND_KERNEL(MulAssign, *=)
COMPOUND_KERNEL(DivAssign, /=)
COMPOUND_KERNEL_LAUNCH(AddAssign)
COMPOUND_KERNEL_LAUNCH(SubAssign)
COMPOUND_KERNEL_LAUNCH(MulAssign)
COMPOUND_KERNEL_LAUNCH(DivAssign)
COMPARISON_KERNEL(Equal, ==)
COMPARISON_KERNEL(NotEqual, !=)
COMPARISON_KERNEL(Less, <)
COMPARISON_KERNEL(LessEqual, <=)
COMPARISON_KERNEL(Greater, >)
COMPARISON_KERNEL(GreaterEqual, >=)
COMPARISON_KERNEL_LAUNCH(Equal)
COMPARISON_KERNEL_LAUNCH(NotEqual)
COMPARISON_KERNEL_LAUNCH(Less)
COMPARISON_KERNEL_LAUNCH(LessEqual)
COMPARISON_KERNEL_LAUNCH(Greater)
COMPARISON_KERNEL_LAUNCH(GreaterEqual)
TEST(float16, arithmetic_on_gpu) {
TestAdd(1, 2, 3);
TestSub(2, 1, 1);
TestMul(2, 3, 6);
TestDiv(6, 2, 3);
TestNeg(1, -1);
}
TEST(float16, compound_on_gpu) {
TestAddAssign(1, 2, 3);
TestSubAssign(2, 1, 1);
TestMulAssign(2, 3, 6);
TestDivAssign(6, 2, 3);
}
TEST(float16, comparision_on_gpu) {
TestEqual(1, 1, true);
TestEqual(1, 2, false);
TestNotEqual(2, 3, true);
TestNotEqual(2, 2, false);
TestLess(3, 4, true);
TestLess(3, 3, false);
TestLessEqual(3, 3, true);
TestLessEqual(3, 2, false);
TestGreater(4, 3, true);
TestGreater(4, 4, false);
TestGreaterEqual(4, 4, true);
TestGreaterEqual(4, 5, false);
}
#endif // TORCH_HIP_VERSION
TEST(float16, conversion_on_gpu) {
// Explicit conversion to and from cuda half
EXPECT_EQ(float16(half(float16(1.0f))).x, 0x3c00);
EXPECT_EQ(float16(half(float16(0.5f))).x, 0x3800);
EXPECT_EQ(float16(half(float16(0.33333f))).x, 0x3555);
EXPECT_EQ(float16(half(float16(0.0f))).x, 0x0000);
EXPECT_EQ(float16(half(float16(-0.0f))).x, 0x8000);
EXPECT_EQ(float16(half(float16(65504.0f))).x, 0x7bff);
EXPECT_EQ(float16(half(float16(65536.0f))).x, 0x7c00);
// Assignment operator
float16 v_assign;
v_assign = half(float16(1.0f));
EXPECT_EQ(v_assign.x, 0x3c00);
}
template <typename T>
struct Functor {
bool operator()(const T& val) {
return std::type_index(typeid(T)) == std::type_index(typeid(float16));
}
};
TEST(float16, typeid) {
// the framework heavily used typeid hash
Functor<float16> functor;
float16 a = float16(.0f);
Functor<int> functor2;
int b(0);
// compile time assert
CHECK_EQ(functor(a), true);
CHECK_EQ(functor2(b), false);
}
// GPU test
TEST(float16, isinf) {
float16 a;
a.x = 0x7c00;
float16 b = float16(INFINITY);
// underflow to 0
float16 native_a(5e-40f);
EXPECT_EQ(std::isinf(a), true);
EXPECT_EQ(std::isinf(b), true);
#ifndef _WIN32
// overflow to inf
float16 native_b(5e40f);
EXPECT_EQ(std::isinf(native_b), true);
#endif
EXPECT_EQ(native_a, float16(0));
}
TEST(float16, isnan) {
float16 a;
a.x = 0x7fff;
float16 b = float16(NAN);
float16 c = float16(5e40);
// inf * +-0 will get a nan
float16 d = c * float16(0);
EXPECT_EQ(std::isnan(a), true);
EXPECT_EQ(std::isnan(b), true);
EXPECT_EQ(std::isnan(d), true);
}
TEST(float16, cast) {
float16 a;
a.x = 0x0070;
auto b = a;
{
// change semantic, keep the same value
float16 c = reinterpret_cast<float16&>(reinterpret_cast<unsigned&>(b));
EXPECT_EQ(b, c);
}
{
// use uint32 low 16 bit store float16
uint32_t c = reinterpret_cast<uint32_t&>(b);
float16 d;
d.x = c;
EXPECT_EQ(b, d);
}
}
} // namespace lite
} // namespace paddle
#endif // LITE_CUDA_FP16
|
cf698988bbfcda7e7f362f44a9af8a20610b397a.cu
|
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "lite/utils/float16.h"
#include <gtest/gtest.h>
#include <bitset>
#include <iostream>
#include <typeindex>
#include "lite/utils/cp_logging.h"
#define ARITHMETIC_KERNEL(op_type, sign) \
__global__ void op_type(const half* in1, const half* in2, half* out) { \
out[0] = in1[0] sign in2[0]; \
}
#define COMPOUND_KERNEL(op_type, sign) \
__global__ void op_type(half* in1, const half* in2) { in1[0] sign in2[0]; }
#define COMPARISON_KERNEL(op_type, sign) \
__global__ void op_type(const half* in1, const half* in2, bool* out) { \
out[0] = in1[0] sign in2[0]; \
}
#define ARITHMETIC_KERNEL_LAUNCH(op_type) \
void Test##op_type(float v_in1, float v_in2, float v_out) { \
LOG(INFO) << "Test " << #op_type << " on GPU!"; \
half *in1, *in2, *out; \
half *d_in1, *d_in2, *d_out; \
int size = sizeof(half); \
cudaMalloc(reinterpret_cast<void**>(&d_in1), size); \
cudaMalloc(reinterpret_cast<void**>(&d_in2), size); \
cudaMalloc(reinterpret_cast<void**>(&d_out), size); \
in1 = reinterpret_cast<half*>(malloc(size)); \
in2 = reinterpret_cast<half*>(malloc(size)); \
out = reinterpret_cast<half*>(malloc(size)); \
in1[0] = half(float16(v_in1)); \
in2[0] = half(float16(v_in2)); \
cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \
cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \
op_type<<<1, 1>>>(d_in1, d_in2, d_out); \
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); \
EXPECT_EQ(static_cast<float>(float16(out[0])), v_out); \
free(in1); \
free(in2); \
free(out); \
cudaFree(d_in1); \
cudaFree(d_in2); \
cudaFree(d_out); \
}
#define COMPOUND_KERNEL_LAUNCH(op_type) \
void Test##op_type(float v_in1, float v_in2, float v_out) { \
LOG(INFO) << "Test " << #op_type << " on GPU!"; \
half *in1, *in2; \
half *d_in1, *d_in2; \
int size = sizeof(half); \
cudaMalloc(reinterpret_cast<void**>(&d_in1), size); \
cudaMalloc(reinterpret_cast<void**>(&d_in2), size); \
in1 = reinterpret_cast<half*>(malloc(size)); \
in2 = reinterpret_cast<half*>(malloc(size)); \
in1[0] = half(float16(v_in1)); \
in2[0] = half(float16(v_in2)); \
cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \
cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \
op_type<<<1, 1>>>(d_in1, d_in2); \
cudaMemcpy(in1, d_in1, size, cudaMemcpyDeviceToHost); \
EXPECT_EQ(static_cast<float>(float16(in1[0])), v_out); \
free(in1); \
free(in2); \
cudaFree(d_in1); \
cudaFree(d_in2); \
}
#define COMPARISON_KERNEL_LAUNCH(op_type) \
void Test##op_type(float v_in1, float v_in2, bool v_out) { \
LOG(INFO) << "Test " << #op_type << " on GPU!"; \
half *in1, *in2; \
half *d_in1, *d_in2; \
bool *out, *d_out; \
int size = sizeof(half); \
cudaMalloc(reinterpret_cast<void**>(&d_in1), size); \
cudaMalloc(reinterpret_cast<void**>(&d_in2), size); \
cudaMalloc(reinterpret_cast<void**>(&d_out), 1); \
in1 = reinterpret_cast<half*>(malloc(size)); \
in2 = reinterpret_cast<half*>(malloc(size)); \
out = reinterpret_cast<bool*>(malloc(1)); \
in1[0] = half(float16(v_in1)); \
in2[0] = half(float16(v_in2)); \
cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \
cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \
op_type<<<1, 1>>>(d_in1, d_in2, d_out); \
cudaMemcpy(out, d_out, 1, cudaMemcpyDeviceToHost); \
EXPECT_EQ(out[0], v_out); \
free(in1); \
free(in2); \
free(out); \
cudaFree(d_in1); \
cudaFree(d_in2); \
cudaFree(d_out); \
}
#ifdef LITE_CUDA_FP16
namespace paddle {
namespace lite {
#if CUDA_VERSION < 9000
ARITHMETIC_KERNEL(Add, +)
ARITHMETIC_KERNEL(Sub, -)
ARITHMETIC_KERNEL(Mul, *)
ARITHMETIC_KERNEL(Div, /)
ARITHMETIC_KERNEL_LAUNCH(Add)
ARITHMETIC_KERNEL_LAUNCH(Sub)
ARITHMETIC_KERNEL_LAUNCH(Mul)
ARITHMETIC_KERNEL_LAUNCH(Div)
// Negative sign kernel
__global__ void Neg(half* in) { in[0] = -in[0]; }
void TestNeg(float v_in, float v_out) {
LOG(INFO) << "Test Neg on GPU!";
half *in, *d_in;
int size = sizeof(half);
cudaMalloc(reinterpret_cast<void**>(&d_in), size);
in = reinterpret_cast<half*>(malloc(size));
in[0] = half(float16(v_in));
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
Neg<<<1, 1>>>(d_in);
cudaMemcpy(in, d_in, size, cudaMemcpyDeviceToHost);
EXPECT_EQ(static_cast<float>(float16(in[0])), v_out);
free(in);
cudaFree(d_in);
}
COMPOUND_KERNEL(AddAssign, +=)
COMPOUND_KERNEL(SubAssign, -=)
COMPOUND_KERNEL(MulAssign, *=)
COMPOUND_KERNEL(DivAssign, /=)
COMPOUND_KERNEL_LAUNCH(AddAssign)
COMPOUND_KERNEL_LAUNCH(SubAssign)
COMPOUND_KERNEL_LAUNCH(MulAssign)
COMPOUND_KERNEL_LAUNCH(DivAssign)
COMPARISON_KERNEL(Equal, ==)
COMPARISON_KERNEL(NotEqual, !=)
COMPARISON_KERNEL(Less, <)
COMPARISON_KERNEL(LessEqual, <=)
COMPARISON_KERNEL(Greater, >)
COMPARISON_KERNEL(GreaterEqual, >=)
COMPARISON_KERNEL_LAUNCH(Equal)
COMPARISON_KERNEL_LAUNCH(NotEqual)
COMPARISON_KERNEL_LAUNCH(Less)
COMPARISON_KERNEL_LAUNCH(LessEqual)
COMPARISON_KERNEL_LAUNCH(Greater)
COMPARISON_KERNEL_LAUNCH(GreaterEqual)
TEST(float16, arithmetic_on_gpu) {
TestAdd(1, 2, 3);
TestSub(2, 1, 1);
TestMul(2, 3, 6);
TestDiv(6, 2, 3);
TestNeg(1, -1);
}
TEST(float16, compound_on_gpu) {
TestAddAssign(1, 2, 3);
TestSubAssign(2, 1, 1);
TestMulAssign(2, 3, 6);
TestDivAssign(6, 2, 3);
}
TEST(float16, comparision_on_gpu) {
TestEqual(1, 1, true);
TestEqual(1, 2, false);
TestNotEqual(2, 3, true);
TestNotEqual(2, 2, false);
TestLess(3, 4, true);
TestLess(3, 3, false);
TestLessEqual(3, 3, true);
TestLessEqual(3, 2, false);
TestGreater(4, 3, true);
TestGreater(4, 4, false);
TestGreaterEqual(4, 4, true);
TestGreaterEqual(4, 5, false);
}
#endif // CUDA_VERSION
TEST(float16, conversion_on_gpu) {
// Explicit conversion to and from cuda half
EXPECT_EQ(float16(half(float16(1.0f))).x, 0x3c00);
EXPECT_EQ(float16(half(float16(0.5f))).x, 0x3800);
EXPECT_EQ(float16(half(float16(0.33333f))).x, 0x3555);
EXPECT_EQ(float16(half(float16(0.0f))).x, 0x0000);
EXPECT_EQ(float16(half(float16(-0.0f))).x, 0x8000);
EXPECT_EQ(float16(half(float16(65504.0f))).x, 0x7bff);
EXPECT_EQ(float16(half(float16(65536.0f))).x, 0x7c00);
// Assignment operator
float16 v_assign;
v_assign = half(float16(1.0f));
EXPECT_EQ(v_assign.x, 0x3c00);
}
template <typename T>
struct Functor {
bool operator()(const T& val) {
return std::type_index(typeid(T)) == std::type_index(typeid(float16));
}
};
TEST(float16, typeid) {
// the framework heavily used typeid hash
Functor<float16> functor;
float16 a = float16(.0f);
Functor<int> functor2;
int b(0);
// compile time assert
CHECK_EQ(functor(a), true);
CHECK_EQ(functor2(b), false);
}
// GPU test
TEST(float16, isinf) {
float16 a;
a.x = 0x7c00;
float16 b = float16(INFINITY);
// underflow to 0
float16 native_a(5e-40f);
EXPECT_EQ(std::isinf(a), true);
EXPECT_EQ(std::isinf(b), true);
#ifndef _WIN32
// overflow to inf
float16 native_b(5e40f);
EXPECT_EQ(std::isinf(native_b), true);
#endif
EXPECT_EQ(native_a, float16(0));
}
TEST(float16, isnan) {
float16 a;
a.x = 0x7fff;
float16 b = float16(NAN);
float16 c = float16(5e40);
// inf * +-0 will get a nan
float16 d = c * float16(0);
EXPECT_EQ(std::isnan(a), true);
EXPECT_EQ(std::isnan(b), true);
EXPECT_EQ(std::isnan(d), true);
}
TEST(float16, cast) {
float16 a;
a.x = 0x0070;
auto b = a;
{
// change semantic, keep the same value
float16 c = reinterpret_cast<float16&>(reinterpret_cast<unsigned&>(b));
EXPECT_EQ(b, c);
}
{
// use uint32 low 16 bit store float16
uint32_t c = reinterpret_cast<uint32_t&>(b);
float16 d;
d.x = c;
EXPECT_EQ(b, d);
}
}
} // namespace lite
} // namespace paddle
#endif // LITE_CUDA_FP16
|
81c3ebb8fe808bb55af254506743383823e7c69d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_minus_2_right;
int xdim0_update_halo_kernel2_xvel_minus_2_right_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_minus_2_right;
int ydim0_update_halo_kernel2_xvel_minus_2_right_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_minus_2_right;
int xdim1_update_halo_kernel2_xvel_minus_2_right_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_minus_2_right;
int ydim1_update_halo_kernel2_xvel_minus_2_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_minus_2_right * (y) + \
xdim0_update_halo_kernel2_xvel_minus_2_right * \
ydim0_update_halo_kernel2_xvel_minus_2_right * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_minus_2_right * (y) + \
xdim1_update_halo_kernel2_xvel_minus_2_right * \
ydim1_update_halo_kernel2_xvel_minus_2_right * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_minus_2_right_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = -xvel0[OPS_ACC0(-2, 0, 0)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = -xvel1[OPS_ACC1(-2, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_minus_2_right(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_minus_2_right +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_minus_2_right *
ydim0_update_halo_kernel2_xvel_minus_2_right;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_minus_2_right +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_minus_2_right *
ydim1_update_halo_kernel2_xvel_minus_2_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_minus_2_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_minus_2_right(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 76))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(76, "update_halo_kernel2_xvel_minus_2_right");
OPS_kernels[76].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_minus_2_right_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_minus_2_right_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_minus_2_right_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_minus_2_right_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_minus_2_right, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_minus_2_right_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_minus_2_right, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_minus_2_right_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_minus_2_right, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_minus_2_right_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_minus_2_right, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_minus_2_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[76].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_minus_2_right), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[76].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[76].mpi_time += t2 - t1;
OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
81c3ebb8fe808bb55af254506743383823e7c69d.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_minus_2_right;
int xdim0_update_halo_kernel2_xvel_minus_2_right_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_minus_2_right;
int ydim0_update_halo_kernel2_xvel_minus_2_right_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_minus_2_right;
int xdim1_update_halo_kernel2_xvel_minus_2_right_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_minus_2_right;
int ydim1_update_halo_kernel2_xvel_minus_2_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_minus_2_right * (y) + \
xdim0_update_halo_kernel2_xvel_minus_2_right * \
ydim0_update_halo_kernel2_xvel_minus_2_right * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_minus_2_right * (y) + \
xdim1_update_halo_kernel2_xvel_minus_2_right * \
ydim1_update_halo_kernel2_xvel_minus_2_right * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_minus_2_right_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = -xvel0[OPS_ACC0(-2, 0, 0)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = -xvel1[OPS_ACC1(-2, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_minus_2_right(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_minus_2_right +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_minus_2_right *
ydim0_update_halo_kernel2_xvel_minus_2_right;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_minus_2_right +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_minus_2_right *
ydim1_update_halo_kernel2_xvel_minus_2_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_minus_2_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_minus_2_right(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 76))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(76, "update_halo_kernel2_xvel_minus_2_right");
OPS_kernels[76].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_minus_2_right_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_minus_2_right_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_minus_2_right_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_minus_2_right_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_minus_2_right, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_minus_2_right_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_minus_2_right, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_minus_2_right_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_minus_2_right, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_minus_2_right_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_minus_2_right, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_minus_2_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[76].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_xvel_minus_2_right<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[76].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[76].mpi_time += t2 - t1;
OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[76].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
fd21b41f516b56d4cc7da5b0783241eb9e5b1fdd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void updateGradInputVarScaleKernel( float *gradOutputIntData, float *gradInputData, int h, int w, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax) {
const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y;
if (x < h and y < w) {
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
double outValue = 0;
for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) {
xMinCurr = (int)ceil(-xMax[windowIdx]);
yMinCurr = (int)ceil(-yMax[windowIdx]);
xMaxCurr = (int)floor(-xMin[windowIdx]) + 1;
yMaxCurr = (int)floor(-yMin[windowIdx]) + 1;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
// go to the next channel
gradOutputIntData += (h+1)*(w+1);
}
gradInputData[x*w + y] = outValue;
}
}
|
fd21b41f516b56d4cc7da5b0783241eb9e5b1fdd.cu
|
#include "includes.h"
__global__ void updateGradInputVarScaleKernel( float *gradOutputIntData, float *gradInputData, int h, int w, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax) {
const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y;
if (x < h and y < w) {
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
double outValue = 0;
for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) {
xMinCurr = (int)ceil(-xMax[windowIdx]);
yMinCurr = (int)ceil(-yMax[windowIdx]);
xMaxCurr = (int)floor(-xMin[windowIdx]) + 1;
yMaxCurr = (int)floor(-yMin[windowIdx]) + 1;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
// go to the next channel
gradOutputIntData += (h+1)*(w+1);
}
gradInputData[x*w + y] = outValue;
}
}
|
a28ff73992bca258c2b5ca521fa9b4361ae40819.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
dan Sandin 8-22-10
*/
#include "CudaParticle.h"
__constant__ float refldata[REFL_DATA_MUNB][REFL_DATA_ROWS][REFL_DATA_ROW_ELEM];
__constant__ float injdata[INJT_DATA_MUNB][INJT_DATA_ROWS][INJT_DATA_ROW_ELEM];
void setReflData(void * data, int size)
{
hipMemcpyToSymbol(refldata,data,size);
}
void setInjData(void * data, int size)
{
hipMemcpyToSymbol(injdata,data,size);
}
void launchPoint1(float3* pos, float4* color, float * pdata,float * debugData ,unsigned int width,
unsigned int height, int max_age,int disappear_age,float alphaControl, float time, float gravity, float colorFreq, float r3)
{
dim3 block(8,8,1);
dim3 grid(CUDA_MESH_WIDTH / 8, CUDA_MESH_HEIGHT / 8, 1);
hipLaunchKernelGGL(( Point1), dim3(grid), dim3(block), 0, 0, pos,color,pdata,debugData,width,height,max_age,disappear_age,alphaControl,time,gravity,colorFreq,r3);
}
///////////////////////////////////////////////////////////////////////////////
//! Simple partical system
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__device__ void injector0(unsigned int arrayLoc,unsigned int posLoc,float time,float4* pos, float* pdata){
//ovels
// sin(time) + x index/width, x y are randomly selected because of randon nature of age
// x afects angular velocity distribution x,y afects liniar velocity distribution
// pdata[arrayLoc+1] = 0.02 * (sin(time/5 + (float)x/(float)width/10.0) * (float)(x * y )/ (float)(width * height)/1.0f ) ;//x velocity sin(time) + x index/width, x is randomly selected because of randon nature of age
pdata[arrayLoc+1] = 0.02 * (sin(time/5 + (pdata[arrayLoc+5] + 1)/50) * ( ((pdata[arrayLoc+5] +1)/1 ) * (pdata[arrayLoc+4] + 1.0)/1) ) ;//x velocity sin(time) + x index/width, x is randomly selected because of randon nature of age
pdata[arrayLoc+2] = 0;
//ovels
// pdata[arrayLoc+3] = 0.02 * (cos(time/5 + (float)x/(float)width/10.0) * (float)(x *y) / (float)(width * height)/1.0f );// y velocity
pdata[arrayLoc+3] = 0.02 * (cos(time/5 + (pdata[arrayLoc+5] + 1)/50) *( ((pdata[arrayLoc+5] +1)/1 ) * (pdata[arrayLoc+4] + 1.0)/1));// y velocity
// maybe move the generation point around?
{
pos[posLoc].x = 0;
pos[posLoc].y = 0.5;
pos[posLoc].z = 0;
}
}
__device__ float distRnd1( float seed, int iter){
unsigned int rndint1;
rndint1 = (unsigned int)(((seed +1.0)/2.0) *32768) % 32768;
float sum ;
sum =0;
for ( int i = 0;i<iter;i++)
{
rndint1 = ((rndint1 * 1103515245 + 12345)/65536) % 32768;
sum = sum + 0.0002 * (rndint1 % 10000) -1.0;
}
return sum/iter;
}
__device__ void injector1(unsigned int arrayLoc,unsigned int posLoc,float time,float4* pos, float* pdata){
float rnd1,rnd2,rnd3;
// not used now
rnd1 = distRnd1(pdata[arrayLoc+4] , 5);
rnd2 = distRnd1(pdata[arrayLoc+5] , 5);
rnd3 = distRnd1(pdata[arrayLoc+6] , 5);
pdata[arrayLoc+1] = 0.02 * (sin(time/5 + (rnd1)/50) * (rnd2 +1)) ;//x vloocity
pdata[arrayLoc+2] = 0.002 * rnd3;
pdata[arrayLoc+3] = 0.02 * (cos(time/5 + (rnd1)/50) *(rnd2 +1)); //y volocity
pos[posLoc].x = 0;
//pos[posLoc].y = pdata[7];
pos[posLoc].y = 0;
pos[posLoc].z = 0;
}
__device__ void injector2(unsigned int arrayLoc,unsigned int posLoc,int injNum,float time,float3* pos, float* pdata,float* debugData){
float rnd1,rnd2,rnd3,rnd4,rnd5;
float dt,du,dx,dy,dz,dx2,dy2,dz2,len,vx,vy,vz,dxt,dyt,dzt,dxu,dyu,dzu;
// used now
// float dv
/*
injdata[injNum][1][0]// type, injection ratio ie streem volume, ~
injdata[injNum][2][0];//x,y,z position
injdata[injNum][3][0];//x,y,z velocity
injdata[injNum][4][0];//x,y,z size
injdata[injNum][5][0];//t,u,v jiter v not implemented = speed
injdata[injNum][6][0];//speed jiter ~
injdata[injNum][7][0];//centrality of rnd distribution speed dt tu
*/
//if ((pdata[arrayLoc+4] +1) /2 < injdata[injNum][1][1]){ return;}// reterns without injection ?????
rnd1 = (distRnd1(pdata[arrayLoc+4] , (int)injdata[injNum][7][0])+1)/2;
rnd2 = (distRnd1(pdata[arrayLoc+5] , (int)injdata[injNum][7][1])+1)/2;
rnd3 = (distRnd1(pdata[arrayLoc+6] , (int)injdata[injNum][7][2])+1)/2;
rnd4 = (distRnd1(pdata[arrayLoc+4],1) );
rnd5 = (distRnd1(pdata[arrayLoc+5],1) );
//float rnd6 = (distRnd1(pdata[arrayLoc+6],1) );
vx = injdata[injNum][3][0];vy = injdata[injNum][3][1];vz = injdata[injNum][3][2];//direction of spray
dt = injdata[injNum][5][0];du = injdata[injNum][5][1];// dv = injdata[injecti +17] * 0;// z component not implemented jitterelitive to direction of spreay
// vector vx,vy,vz X 0,1,0
dx = -vz;dy = 0;dz = vx;// dt directon
len = sqrt(dx*dx +dy*dy + dz*dz);
if (len ==0)
{
dx = 0;dy =0;dz =0;
}
else{
dx =dx/len;dy =dy/len;dz =dz/len;
}
//scale by dt amout of jitter in dt direction
dxt = dx *dt;dyt = dy * dt;dzt = dz *dt;
// vector vx,vy,vz X 0,1,0 X vx,vy,vz
dx2 = vy*vx;dy2 = vz*vz-vx*vx;dz2 = vy*vz;// du direction
len = sqrt(dx2*dx2 +dy2*dy2 + dz2*dz2);
if (len ==0)
{
dx2 = 0;dy2 =0;dz2 =0;
}
else{
dx2 =dx2/len;dy2 =dy2/len;dz2 =dz2/len;
}
//scale by du amout of jutter in du direction
dxu = dx2 *du;dyu = dy2 * du;dzu = dz2 *du;
//indesices num injectors =0,position =6,velosity =9, size =12 tuv jiter = 15,speed = 18,centrality of randum
// 3 + speed component velocity t jitter u jitter
if (injdata[injNum][1][0] ==1)
{
pdata[arrayLoc+1] = ( rnd1 * injdata[injNum][6][0]) * (injdata[injNum][3][0] + dxt * rnd2 + dxu * rnd3) ;//x vloocity
pdata[arrayLoc+2] = ( rnd1 * injdata[injNum][6][0]) * (injdata[injNum][3][1] + dyt * rnd2+ dyu * rnd3) ; // y velocity
pdata[arrayLoc+3] = ( rnd1 * injdata[injNum][6][0]) * (injdata[injNum][3][2] + dzt * rnd2+ dzu * rnd3); //z volocity
}
if (injdata[injNum][1][0] ==2)
{
pdata[arrayLoc+1] = ( rnd1 * injdata[injNum][6][1]+ injdata[injNum][6][0]) * (injdata[injNum][3][0] + dxt * rnd2 + dxu * rnd3) ;//x vloocity
pdata[arrayLoc+2] = ( rnd1 * injdata[injNum][6][1] + injdata[injNum][6][0]) * (injdata[injNum][3][1] + dyt * rnd2+ dyu * rnd3) ; // y velocity
pdata[arrayLoc+3] = ( rnd1 * injdata[injNum][6][1] +injdata[injNum][6][0]) * (injdata[injNum][3][2] + dzt * rnd2+ dzu * rnd3); //z volocity
}
// size computation xform to dt du dv
dt = injdata[injNum][4][0];du = injdata[injNum][4][1];//dv = injdata[injecti +14] * 0;//re use varables z component not implemented jitterelitive to direction of spreay
dxt = dx *dt;dyt = dy * dt;dzt = dz *dt;
dxu = dx2 *du;dyu = dy2 * du;dzu = dz2 *du;
if (injdata[injNum][1][0] ==1)
{
pos[posLoc].x = injdata[injNum][2][0] + dxt * rnd4 + dxu * rnd5;
pos[posLoc].y = injdata[injNum][2][1] + dyt * rnd4 + dyu * rnd5 ;
pos[posLoc].z = injdata[injNum][2][2] + dzt * rnd4+ dzu * rnd5;
}
if (injdata[injNum][1][0] ==2)
{
pos[posLoc].x = injdata[injNum][2][0] + injdata[injNum][4][0] * distRnd1(pdata[arrayLoc+4] , 3);
pos[posLoc].y = injdata[injNum][2][1] + injdata[injNum][4][1] * distRnd1(pdata[arrayLoc+5] , 3);
pos[posLoc].z = injdata[injNum][2][2] + injdata[injNum][4][2] * distRnd1(pdata[arrayLoc+6] , 3);
}
if (DEBUG == 1)
{
int dbi =0;
debugData[dbi + 0] = (float)injNum ;debugData[dbi + 1] = injdata[injNum][1][1];debugData[dbi + 2] =0;
dbi=dbi+3;
debugData[dbi + 0] = dx;debugData[dbi + 1] = dy;debugData[dbi + 2] = dz;
dbi=dbi+3;
debugData[dbi + 0] = dx2;debugData[dbi + 1] = dy2;debugData[dbi + 2] = dz2;
dbi=dbi+3;
debugData[dbi + 0] = dxt;debugData[dbi + 1] = dyt;debugData[dbi + 2] = dzt;
dbi=dbi+3;
debugData[dbi + 0] = dxu;debugData[dbi + 1] = dyu;debugData[dbi + 2] = dzu;
}
}
///////////////////////////////////////////////////////////////////////
__device__ void planeReflector1(float posX,float posY,float posZ,unsigned int arrayLoc,unsigned int posLoc,int reflNum,float time,float3* pos, float* pdata,float* debugData)
{
float xn =1,yn =1,zn =0, rad =1,damping =.7,noTraping;
float xp,yp,zp;
// used now
//indexices num injectors =0,position =[reflNum][1][0],normal =[reflNum][2][0], size =[reflNum][3][0] tuv jiter = [reflNum ][4][0],damping = [reflNum ][4][0],centrality of randum = 21
//dataorginization refldata[reflNum][rownum][quardinare numbr x=0,1=y,2=z]
//dataorginization type rownum 0 ~~ ,position 1,normal 2,radis 3,reflection coef 5,
xn = refldata[reflNum][2][0];yn = refldata[reflNum][2][1];zn = refldata[reflNum][2][2];//normal
rad = refldata[reflNum][3][0];
damping = refldata[reflNum][5][0];
noTraping = refldata[reflNum][5][1];
xp = refldata[reflNum][1][0];yp = refldata[reflNum][1][1];zp = refldata[reflNum][1][2];//reflector position
float length = sqrt(xn * xn + yn * yn + zn * zn);
xn = xn/length;
yn = yn/length;
zn = zn/length;
float distx = posX - xp;//point position - reflector position
float disty = posY - yp;
float distz = posZ - zp;
float xv = pdata[arrayLoc+1];float yv = pdata[arrayLoc+2];float zv = pdata[arrayLoc+3];
// if ((fabs(distx) <= rad) && (fabs(disty)<= rad) && (fabs(distz) <= rad))
if ((distx * distx + disty * disty + distz * distz) <= rad * rad)
{
if ((distx * xn + disty * yn + distz * zn) <=0)
{
if ((REFL_HITS == 1) && (noTraping ==1))
{
if(reflNum < 128) debugData[reflNum] = debugData[reflNum] +1;
}
float ndotv = xv * xn + yv * yn + zv * zn;
float newVX =(xv - 2.0*ndotv*xn);
float newVY =(yv - 2.0*ndotv*yn);
float newVZ =(zv - 2.0*ndotv*zn);
// experments to lower traping did not work
//damping =1;
//one iteration wothout damping to prevent capture.
pos[posLoc].x = posX + noTraping * newVX;
pos[posLoc].y = posY + noTraping * newVY;
pos[posLoc].z = posZ + noTraping * newVZ;
pdata[arrayLoc+1] = newVX*damping;
pdata[arrayLoc+2] = newVY*damping;
pdata[arrayLoc+3] = newVZ*damping;
//pdata[arrayLoc] = 0;// temp set age to 0
if ((noTraping ==1)&& (refldata[reflNum][0][1]) == 1 )
{
pdata[arrayLoc] = pdata[arrayLoc]/2.0;
}
}
}
}
__global__ void Point1(float3* pos, float4* color, float * pdata,float * debugData ,unsigned int width,
unsigned int height, int max_age,int disappear_age,float alphaControl, float time, float gravity, float colorFreq, float r3)
{
// used now
// r1,r2,r3 curently not used
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int arrayLoc = y*width*PDATA_ROW_SIZE + x*PDATA_ROW_SIZE;
unsigned int posLoc = y*width+x;
float newX,newY,newZ,posX,posY,posZ;
/*
arrayLoc is data index of partical in pdata
pdata [arrayLoc] = age ,pdata[arrayLoc +1 +2 +3 = x ,y,z velocity +rd1 rnd2 rnd3
posLoc is index of partical location and[width*height + posLoc] index of color
pos[posLoc].x .y .z is x,y,z pos
pos[ [width*height + posLoc].x .y .z is red green blue color
In lines mode vbo is twice as big with tail and head psitions
*/
if (pdata[arrayLoc] >= max_age)
{
int injecNum = ((arrayLoc/PDATA_ROW_SIZE) % (int) injdata[0][0][0]) +1;// pdata row mod number of injectors
if(( injdata[injecNum][1][1]) ) injector2(arrayLoc,posLoc,injecNum,time,pos,pdata,debugData);
pdata[arrayLoc] = 0;//set age to 0
}
posX=pos[posLoc].x;posY=pos[posLoc].y;posZ=pos[posLoc].z;
// reflector
for (int reflNum = 1;reflNum <= refldata[0][0][0] ;reflNum ++)
{
//planeReflector1( pos[posLoc].x, pos[posLoc].y, pos[posLoc].z,arrayLoc,posLoc,reflNum,time,pos,pdata,debugData);
if (refldata[reflNum][0][0] ==1)planeReflector1(posX,posY,posZ,arrayLoc,posLoc,reflNum,time,pos,pdata,debugData);
}
pdata[arrayLoc] += 1; // increase age
pdata[arrayLoc+2] -= gravity; // gravity
{ // add velocity to position ie intigrate but not store in pos[]
posX=pos[posLoc].x;posY=pos[posLoc].y;posZ=pos[posLoc].z;// plane reflector modifyes position info
newX = posX + pdata[arrayLoc+1];
newY = posY + pdata[arrayLoc+2];
newZ = posZ + pdata[arrayLoc+3];
}
{
color[posLoc].y = (cos(colorFreq * 2.0 * pdata[arrayLoc]/max_age))/2.0f + 0.5f ;//green
color[posLoc].x = (cos(colorFreq * 1.0 * pdata[arrayLoc]/max_age))/2.0f + 0.5f ;//red
color[posLoc].z = (cos(colorFreq * 4.0 * pdata[arrayLoc]/max_age))/2.0f + 0.5f ;//blue
float alpha =1;
if ((alphaControl == 1) && (newY <=.1)) alpha =0;
color[posLoc].w = alpha;//alpha
// write output vertex
if (pdata[arrayLoc] > disappear_age){pdata[arrayLoc+1] =10000;pdata[arrayLoc+2] =10000;pdata[arrayLoc+3] =10000;}
pos[posLoc] = make_float3(newX, newY, newZ);
}
}
__global__ void PointSquars(float4* pos, float * pdata, unsigned int width,
unsigned int height, int max_age, float time, float r1, float r2, float r3)
{
//not used now
// r1,r2,r3 curently not used
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int arrayLoc = y*width*4 + x*4;
unsigned int posLoc = y*width+x;
float newX,newY,newZ;
/*
arrayLoc is data index of partical in pdata
pdata [arrayLoc] = age ,pdata[arrayLoc +1 +2 +3 = x ,y,z velocity
posLoc is index of partical location and[width*height + posLoc] index of color
pos[posLoc].x .y .z is x,y,z pos
pos[ [width*height + posLoc].x .y .z is red green blue color
In lines mode vbo is twice as big with tail and head psitions
*/
if (pdata[arrayLoc] >= max_age)
{
pdata[arrayLoc] = 0;
pdata[arrayLoc+1] = 0.002 * (sin(time) + (float)x / (float)width/10.0f ) ;//x velocity sin(time) + x index/width, x is randomly selected because of randon nature of age
pdata[arrayLoc+2] = 0;
pdata[arrayLoc+3] = 0.002 * (cos(time) + (float)(y) / (float)( height)/10.0f );// y velocity
// maybe move the generation point around?
{
pos[posLoc].x = 0;
pos[posLoc].y = 0.5;
pos[posLoc].z = 0;
}
}
// add velocity to position ie intigrate
{
newX = pos[posLoc].x + pdata[arrayLoc+1];
newY = pos[posLoc].y + pdata[arrayLoc+2];
newZ = pos[posLoc].z + pdata[arrayLoc+3];
}
pdata[arrayLoc] += 1; // increase age
pdata[arrayLoc+2] -= 10.1; // gravity
// check aganst tabletop surface reverse velocity
{
if ((newY <= 0) && fabs(pos[posLoc].x)<5 && fabs(pos[posLoc].z)<5)
{
//pdata[arrayLoc+2] = -0.7 * pdata[arrayLoc+2];
}
}
// now need to modify the color info in the array
// pos[width*height + posLoc].x = 0.0f;//red
// pos[width*height + posLoc].y = 1.0f;//green
// pos[width*height + posLoc].z = 0.0f;//blue
float colorFreq = 16.0f;
{
pos[width*height + posLoc].y = (cos(colorFreq * 2.0 * pdata[arrayLoc]/max_age))/2.0f + 0.5f ;
pos[width*height + posLoc].x = (cos(colorFreq * 1.0 * pdata[arrayLoc]/max_age))/2.0f + 0.5f ;
pos[width*height + posLoc].z = (cos(colorFreq * 4.0 * pdata[arrayLoc]/max_age))/2.0f + 0.5f ;
// write output vertex
pos[posLoc] = make_float4(newX, newY, newZ, 1.0f);
}
}
|
a28ff73992bca258c2b5ca521fa9b4361ae40819.cu
|
/*
dan Sandin 8-22-10
*/
#include "CudaParticle.h"
__constant__ float refldata[REFL_DATA_MUNB][REFL_DATA_ROWS][REFL_DATA_ROW_ELEM];
__constant__ float injdata[INJT_DATA_MUNB][INJT_DATA_ROWS][INJT_DATA_ROW_ELEM];
void setReflData(void * data, int size)
{
cudaMemcpyToSymbol(refldata,data,size);
}
void setInjData(void * data, int size)
{
cudaMemcpyToSymbol(injdata,data,size);
}
void launchPoint1(float3* pos, float4* color, float * pdata,float * debugData ,unsigned int width,
unsigned int height, int max_age,int disappear_age,float alphaControl, float time, float gravity, float colorFreq, float r3)
{
dim3 block(8,8,1);
dim3 grid(CUDA_MESH_WIDTH / 8, CUDA_MESH_HEIGHT / 8, 1);
Point1<<< grid, block>>>(pos,color,pdata,debugData,width,height,max_age,disappear_age,alphaControl,time,gravity,colorFreq,r3);
}
///////////////////////////////////////////////////////////////////////////////
//! Simple partical system
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__device__ void injector0(unsigned int arrayLoc,unsigned int posLoc,float time,float4* pos, float* pdata){
//ovels
// sin(time) + x index/width, x y are randomly selected because of randon nature of age
// x afects angular velocity distribution x,y afects liniar velocity distribution
// pdata[arrayLoc+1] = 0.02 * (sin(time/5 + (float)x/(float)width/10.0) * (float)(x * y )/ (float)(width * height)/1.0f ) ;//x velocity sin(time) + x index/width, x is randomly selected because of randon nature of age
pdata[arrayLoc+1] = 0.02 * (sin(time/5 + (pdata[arrayLoc+5] + 1)/50) * ( ((pdata[arrayLoc+5] +1)/1 ) * (pdata[arrayLoc+4] + 1.0)/1) ) ;//x velocity sin(time) + x index/width, x is randomly selected because of randon nature of age
pdata[arrayLoc+2] = 0;
//ovels
// pdata[arrayLoc+3] = 0.02 * (cos(time/5 + (float)x/(float)width/10.0) * (float)(x *y) / (float)(width * height)/1.0f );// y velocity
pdata[arrayLoc+3] = 0.02 * (cos(time/5 + (pdata[arrayLoc+5] + 1)/50) *( ((pdata[arrayLoc+5] +1)/1 ) * (pdata[arrayLoc+4] + 1.0)/1));// y velocity
// maybe move the generation point around?
{
pos[posLoc].x = 0;
pos[posLoc].y = 0.5;
pos[posLoc].z = 0;
}
}
__device__ float distRnd1( float seed, int iter){
unsigned int rndint1;
rndint1 = (unsigned int)(((seed +1.0)/2.0) *32768) % 32768;
float sum ;
sum =0;
for ( int i = 0;i<iter;i++)
{
rndint1 = ((rndint1 * 1103515245 + 12345)/65536) % 32768;
sum = sum + 0.0002 * (rndint1 % 10000) -1.0;
}
return sum/iter;
}
__device__ void injector1(unsigned int arrayLoc,unsigned int posLoc,float time,float4* pos, float* pdata){
float rnd1,rnd2,rnd3;
// not used now
rnd1 = distRnd1(pdata[arrayLoc+4] , 5);
rnd2 = distRnd1(pdata[arrayLoc+5] , 5);
rnd3 = distRnd1(pdata[arrayLoc+6] , 5);
pdata[arrayLoc+1] = 0.02 * (sin(time/5 + (rnd1)/50) * (rnd2 +1)) ;//x vloocity
pdata[arrayLoc+2] = 0.002 * rnd3;
pdata[arrayLoc+3] = 0.02 * (cos(time/5 + (rnd1)/50) *(rnd2 +1)); //y volocity
pos[posLoc].x = 0;
//pos[posLoc].y = pdata[7];
pos[posLoc].y = 0;
pos[posLoc].z = 0;
}
__device__ void injector2(unsigned int arrayLoc,unsigned int posLoc,int injNum,float time,float3* pos, float* pdata,float* debugData){
float rnd1,rnd2,rnd3,rnd4,rnd5;
float dt,du,dx,dy,dz,dx2,dy2,dz2,len,vx,vy,vz,dxt,dyt,dzt,dxu,dyu,dzu;
// used now
// float dv
/*
injdata[injNum][1][0]// type, injection ratio ie streem volume, ~
injdata[injNum][2][0];//x,y,z position
injdata[injNum][3][0];//x,y,z velocity
injdata[injNum][4][0];//x,y,z size
injdata[injNum][5][0];//t,u,v jiter v not implemented = speed
injdata[injNum][6][0];//speed jiter ~
injdata[injNum][7][0];//centrality of rnd distribution speed dt tu
*/
//if ((pdata[arrayLoc+4] +1) /2 < injdata[injNum][1][1]){ return;}// reterns without injection ?????
rnd1 = (distRnd1(pdata[arrayLoc+4] , (int)injdata[injNum][7][0])+1)/2;
rnd2 = (distRnd1(pdata[arrayLoc+5] , (int)injdata[injNum][7][1])+1)/2;
rnd3 = (distRnd1(pdata[arrayLoc+6] , (int)injdata[injNum][7][2])+1)/2;
rnd4 = (distRnd1(pdata[arrayLoc+4],1) );
rnd5 = (distRnd1(pdata[arrayLoc+5],1) );
//float rnd6 = (distRnd1(pdata[arrayLoc+6],1) );
vx = injdata[injNum][3][0];vy = injdata[injNum][3][1];vz = injdata[injNum][3][2];//direction of spray
dt = injdata[injNum][5][0];du = injdata[injNum][5][1];// dv = injdata[injecti +17] * 0;// z component not implemented jitterelitive to direction of spreay
// vector vx,vy,vz X 0,1,0
dx = -vz;dy = 0;dz = vx;// dt directon
len = sqrt(dx*dx +dy*dy + dz*dz);
if (len ==0)
{
dx = 0;dy =0;dz =0;
}
else{
dx =dx/len;dy =dy/len;dz =dz/len;
}
//scale by dt amout of jitter in dt direction
dxt = dx *dt;dyt = dy * dt;dzt = dz *dt;
// vector vx,vy,vz X 0,1,0 X vx,vy,vz
dx2 = vy*vx;dy2 = vz*vz-vx*vx;dz2 = vy*vz;// du direction
len = sqrt(dx2*dx2 +dy2*dy2 + dz2*dz2);
if (len ==0)
{
dx2 = 0;dy2 =0;dz2 =0;
}
else{
dx2 =dx2/len;dy2 =dy2/len;dz2 =dz2/len;
}
//scale by du amout of jutter in du direction
dxu = dx2 *du;dyu = dy2 * du;dzu = dz2 *du;
//indesices num injectors =0,position =6,velosity =9, size =12 tuv jiter = 15,speed = 18,centrality of randum
// 3 + speed component velocity t jitter u jitter
if (injdata[injNum][1][0] ==1)
{
pdata[arrayLoc+1] = ( rnd1 * injdata[injNum][6][0]) * (injdata[injNum][3][0] + dxt * rnd2 + dxu * rnd3) ;//x vloocity
pdata[arrayLoc+2] = ( rnd1 * injdata[injNum][6][0]) * (injdata[injNum][3][1] + dyt * rnd2+ dyu * rnd3) ; // y velocity
pdata[arrayLoc+3] = ( rnd1 * injdata[injNum][6][0]) * (injdata[injNum][3][2] + dzt * rnd2+ dzu * rnd3); //z volocity
}
if (injdata[injNum][1][0] ==2)
{
pdata[arrayLoc+1] = ( rnd1 * injdata[injNum][6][1]+ injdata[injNum][6][0]) * (injdata[injNum][3][0] + dxt * rnd2 + dxu * rnd3) ;//x vloocity
pdata[arrayLoc+2] = ( rnd1 * injdata[injNum][6][1] + injdata[injNum][6][0]) * (injdata[injNum][3][1] + dyt * rnd2+ dyu * rnd3) ; // y velocity
pdata[arrayLoc+3] = ( rnd1 * injdata[injNum][6][1] +injdata[injNum][6][0]) * (injdata[injNum][3][2] + dzt * rnd2+ dzu * rnd3); //z volocity
}
// size computation xform to dt du dv
dt = injdata[injNum][4][0];du = injdata[injNum][4][1];//dv = injdata[injecti +14] * 0;//re use varables z component not implemented jitterelitive to direction of spreay
dxt = dx *dt;dyt = dy * dt;dzt = dz *dt;
dxu = dx2 *du;dyu = dy2 * du;dzu = dz2 *du;
if (injdata[injNum][1][0] ==1)
{
pos[posLoc].x = injdata[injNum][2][0] + dxt * rnd4 + dxu * rnd5;
pos[posLoc].y = injdata[injNum][2][1] + dyt * rnd4 + dyu * rnd5 ;
pos[posLoc].z = injdata[injNum][2][2] + dzt * rnd4+ dzu * rnd5;
}
if (injdata[injNum][1][0] ==2)
{
pos[posLoc].x = injdata[injNum][2][0] + injdata[injNum][4][0] * distRnd1(pdata[arrayLoc+4] , 3);
pos[posLoc].y = injdata[injNum][2][1] + injdata[injNum][4][1] * distRnd1(pdata[arrayLoc+5] , 3);
pos[posLoc].z = injdata[injNum][2][2] + injdata[injNum][4][2] * distRnd1(pdata[arrayLoc+6] , 3);
}
if (DEBUG == 1)
{
int dbi =0;
debugData[dbi + 0] = (float)injNum ;debugData[dbi + 1] = injdata[injNum][1][1];debugData[dbi + 2] =0;
dbi=dbi+3;
debugData[dbi + 0] = dx;debugData[dbi + 1] = dy;debugData[dbi + 2] = dz;
dbi=dbi+3;
debugData[dbi + 0] = dx2;debugData[dbi + 1] = dy2;debugData[dbi + 2] = dz2;
dbi=dbi+3;
debugData[dbi + 0] = dxt;debugData[dbi + 1] = dyt;debugData[dbi + 2] = dzt;
dbi=dbi+3;
debugData[dbi + 0] = dxu;debugData[dbi + 1] = dyu;debugData[dbi + 2] = dzu;
}
}
///////////////////////////////////////////////////////////////////////
__device__ void planeReflector1(float posX,float posY,float posZ,unsigned int arrayLoc,unsigned int posLoc,int reflNum,float time,float3* pos, float* pdata,float* debugData)
{
float xn =1,yn =1,zn =0, rad =1,damping =.7,noTraping;
float xp,yp,zp;
// used now
//indexices num injectors =0,position =[reflNum][1][0],normal =[reflNum][2][0], size =[reflNum][3][0] tuv jiter = [reflNum ][4][0],damping = [reflNum ][4][0],centrality of randum = 21
//dataorginization refldata[reflNum][rownum][quardinare numbr x=0,1=y,2=z]
//dataorginization type rownum 0 ~~ ,position 1,normal 2,radis 3,reflection coef 5,
xn = refldata[reflNum][2][0];yn = refldata[reflNum][2][1];zn = refldata[reflNum][2][2];//normal
rad = refldata[reflNum][3][0];
damping = refldata[reflNum][5][0];
noTraping = refldata[reflNum][5][1];
xp = refldata[reflNum][1][0];yp = refldata[reflNum][1][1];zp = refldata[reflNum][1][2];//reflector position
float length = sqrt(xn * xn + yn * yn + zn * zn);
xn = xn/length;
yn = yn/length;
zn = zn/length;
float distx = posX - xp;//point position - reflector position
float disty = posY - yp;
float distz = posZ - zp;
float xv = pdata[arrayLoc+1];float yv = pdata[arrayLoc+2];float zv = pdata[arrayLoc+3];
// if ((fabs(distx) <= rad) && (fabs(disty)<= rad) && (fabs(distz) <= rad))
if ((distx * distx + disty * disty + distz * distz) <= rad * rad)
{
if ((distx * xn + disty * yn + distz * zn) <=0)
{
if ((REFL_HITS == 1) && (noTraping ==1))
{
if(reflNum < 128) debugData[reflNum] = debugData[reflNum] +1;
}
float ndotv = xv * xn + yv * yn + zv * zn;
float newVX =(xv - 2.0*ndotv*xn);
float newVY =(yv - 2.0*ndotv*yn);
float newVZ =(zv - 2.0*ndotv*zn);
// experments to lower traping did not work
//damping =1;
//one iteration wothout damping to prevent capture.
pos[posLoc].x = posX + noTraping * newVX;
pos[posLoc].y = posY + noTraping * newVY;
pos[posLoc].z = posZ + noTraping * newVZ;
pdata[arrayLoc+1] = newVX*damping;
pdata[arrayLoc+2] = newVY*damping;
pdata[arrayLoc+3] = newVZ*damping;
//pdata[arrayLoc] = 0;// temp set age to 0
if ((noTraping ==1)&& (refldata[reflNum][0][1]) == 1 )
{
pdata[arrayLoc] = pdata[arrayLoc]/2.0;
}
}
}
}
__global__ void Point1(float3* pos, float4* color, float * pdata,float * debugData ,unsigned int width,
unsigned int height, int max_age,int disappear_age,float alphaControl, float time, float gravity, float colorFreq, float r3)
{
// used now
// r1,r2,r3 curently not used
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int arrayLoc = y*width*PDATA_ROW_SIZE + x*PDATA_ROW_SIZE;
unsigned int posLoc = y*width+x;
float newX,newY,newZ,posX,posY,posZ;
/*
arrayLoc is data index of partical in pdata
pdata [arrayLoc] = age ,pdata[arrayLoc +1 +2 +3 = x ,y,z velocity +rd1 rnd2 rnd3
posLoc is index of partical location and[width*height + posLoc] index of color
pos[posLoc].x .y .z is x,y,z pos
pos[ [width*height + posLoc].x .y .z is red green blue color
In lines mode vbo is twice as big with tail and head psitions
*/
if (pdata[arrayLoc] >= max_age)
{
int injecNum = ((arrayLoc/PDATA_ROW_SIZE) % (int) injdata[0][0][0]) +1;// pdata row mod number of injectors
if(( injdata[injecNum][1][1]) ) injector2(arrayLoc,posLoc,injecNum,time,pos,pdata,debugData);
pdata[arrayLoc] = 0;//set age to 0
}
posX=pos[posLoc].x;posY=pos[posLoc].y;posZ=pos[posLoc].z;
// reflector
for (int reflNum = 1;reflNum <= refldata[0][0][0] ;reflNum ++)
{
//planeReflector1( pos[posLoc].x, pos[posLoc].y, pos[posLoc].z,arrayLoc,posLoc,reflNum,time,pos,pdata,debugData);
if (refldata[reflNum][0][0] ==1)planeReflector1(posX,posY,posZ,arrayLoc,posLoc,reflNum,time,pos,pdata,debugData);
}
pdata[arrayLoc] += 1; // increase age
pdata[arrayLoc+2] -= gravity; // gravity
{ // add velocity to position ie intigrate but not store in pos[]
posX=pos[posLoc].x;posY=pos[posLoc].y;posZ=pos[posLoc].z;// plane reflector modifyes position info
newX = posX + pdata[arrayLoc+1];
newY = posY + pdata[arrayLoc+2];
newZ = posZ + pdata[arrayLoc+3];
}
{
color[posLoc].y = (cos(colorFreq * 2.0 * pdata[arrayLoc]/max_age))/2.0f + 0.5f ;//green
color[posLoc].x = (cos(colorFreq * 1.0 * pdata[arrayLoc]/max_age))/2.0f + 0.5f ;//red
color[posLoc].z = (cos(colorFreq * 4.0 * pdata[arrayLoc]/max_age))/2.0f + 0.5f ;//blue
float alpha =1;
if ((alphaControl == 1) && (newY <=.1)) alpha =0;
color[posLoc].w = alpha;//alpha
// write output vertex
if (pdata[arrayLoc] > disappear_age){pdata[arrayLoc+1] =10000;pdata[arrayLoc+2] =10000;pdata[arrayLoc+3] =10000;}
pos[posLoc] = make_float3(newX, newY, newZ);
}
}
__global__ void PointSquars(float4* pos, float * pdata, unsigned int width,
unsigned int height, int max_age, float time, float r1, float r2, float r3)
{
//not used now
// r1,r2,r3 curently not used
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int arrayLoc = y*width*4 + x*4;
unsigned int posLoc = y*width+x;
float newX,newY,newZ;
/*
arrayLoc is data index of partical in pdata
pdata [arrayLoc] = age ,pdata[arrayLoc +1 +2 +3 = x ,y,z velocity
posLoc is index of partical location and[width*height + posLoc] index of color
pos[posLoc].x .y .z is x,y,z pos
pos[ [width*height + posLoc].x .y .z is red green blue color
In lines mode vbo is twice as big with tail and head psitions
*/
if (pdata[arrayLoc] >= max_age)
{
pdata[arrayLoc] = 0;
pdata[arrayLoc+1] = 0.002 * (sin(time) + (float)x / (float)width/10.0f ) ;//x velocity sin(time) + x index/width, x is randomly selected because of randon nature of age
pdata[arrayLoc+2] = 0;
pdata[arrayLoc+3] = 0.002 * (cos(time) + (float)(y) / (float)( height)/10.0f );// y velocity
// maybe move the generation point around?
{
pos[posLoc].x = 0;
pos[posLoc].y = 0.5;
pos[posLoc].z = 0;
}
}
// add velocity to position ie intigrate
{
newX = pos[posLoc].x + pdata[arrayLoc+1];
newY = pos[posLoc].y + pdata[arrayLoc+2];
newZ = pos[posLoc].z + pdata[arrayLoc+3];
}
pdata[arrayLoc] += 1; // increase age
pdata[arrayLoc+2] -= 10.1; // gravity
// check aganst tabletop surface reverse velocity
{
if ((newY <= 0) && fabs(pos[posLoc].x)<5 && fabs(pos[posLoc].z)<5)
{
//pdata[arrayLoc+2] = -0.7 * pdata[arrayLoc+2];
}
}
// now need to modify the color info in the array
// pos[width*height + posLoc].x = 0.0f;//red
// pos[width*height + posLoc].y = 1.0f;//green
// pos[width*height + posLoc].z = 0.0f;//blue
float colorFreq = 16.0f;
{
pos[width*height + posLoc].y = (cos(colorFreq * 2.0 * pdata[arrayLoc]/max_age))/2.0f + 0.5f ;
pos[width*height + posLoc].x = (cos(colorFreq * 1.0 * pdata[arrayLoc]/max_age))/2.0f + 0.5f ;
pos[width*height + posLoc].z = (cos(colorFreq * 4.0 * pdata[arrayLoc]/max_age))/2.0f + 0.5f ;
// write output vertex
pos[posLoc] = make_float4(newX, newY, newZ, 1.0f);
}
}
|
766584e0e66de1c2788b4514d2ffa15790ab1867.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "hip/hip_runtime.h"
#include "utility/src/utils.cuh"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void computeLogSumExpWirelength(
const T *x, const T *y,
const int *flat_netpin,
const int *netpin_start,
const unsigned char *net_mask,
int num_nets,
const T* gamma,
const T *inv_gamma,
T *partial_wl,
T *grad_intermediate_x, T *grad_intermediate_y
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int ii = i >> 1;
if (ii < num_nets && net_mask[ii])
{
const T *values;
T *grads;
if (i & 1)
{
values = y;
grads = grad_intermediate_y;
}
else
{
values = x;
grads = grad_intermediate_x;
}
// int degree = netpin_start[ii+1]-netpin_start[ii];
T x_max = -FLT_MAX;
T x_min = FLT_MAX;
for (int j = netpin_start[ii]; j < netpin_start[ii + 1]; ++j)
{
T xx = values[flat_netpin[j]];
x_max = max(xx, x_max);
x_min = min(xx, x_min);
}
T exp_x_sum = 0;
T exp_nx_sum = 0;
for (int j = netpin_start[ii]; j < netpin_start[ii + 1]; ++j)
{
T xx = values[flat_netpin[j]];
T exp_x = exp((xx - x_max) * (*inv_gamma));
T exp_nx = exp((x_min - xx) * (*inv_gamma));
exp_x_sum += exp_x;
exp_nx_sum += exp_nx;
}
partial_wl[i] = (log(exp_x_sum) + log(exp_nx_sum)) * (*gamma) + x_max - x_min;
T reciprocal_exp_x_sum = 1.0 / exp_x_sum;
T reciprocal_exp_nx_sum = 1.0 / exp_nx_sum;
for (int j = netpin_start[ii]; j < netpin_start[ii+1]; ++j)
{
int jj = flat_netpin[j];
T xx = values[jj];
T exp_x = exp((xx - x_max) * (*inv_gamma));
T exp_nx = exp((x_min - xx) * (*inv_gamma));
grads[jj] = (exp_x*reciprocal_exp_x_sum - exp_nx*reciprocal_exp_nx_sum);
}
}
}
template <typename T>
int computeLogSumExpWirelengthCudaLauncher(
const T* x, const T* y,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
const T* inv_gamma,
T* partial_wl,
T* grad_intermediate_x, T* grad_intermediate_y
)
{
int thread_count = 64;
int block_count = (num_nets * 2 + thread_count - 1) / thread_count; // separate x and y
hipLaunchKernelGGL(( computeLogSumExpWirelength), dim3(block_count), dim3(thread_count), 0, 0,
x, y,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
inv_gamma,
partial_wl,
grad_intermediate_x, grad_intermediate_y
);
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
template int computeLogSumExpWirelengthCudaLauncher<T>(\
const T* x, const T* y, \
const int* flat_netpin, \
const int* netpin_start, \
const unsigned char* net_mask, \
int num_nets, \
const T* gamma, \
const T* inv_gamma, \
T* partial_wl, \
T* grad_intermediate_x, T* grad_intermediate_y \
);
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
766584e0e66de1c2788b4514d2ffa15790ab1867.cu
|
#include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "cuda_runtime.h"
#include "utility/src/utils.cuh"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void computeLogSumExpWirelength(
const T *x, const T *y,
const int *flat_netpin,
const int *netpin_start,
const unsigned char *net_mask,
int num_nets,
const T* gamma,
const T *inv_gamma,
T *partial_wl,
T *grad_intermediate_x, T *grad_intermediate_y
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int ii = i >> 1;
if (ii < num_nets && net_mask[ii])
{
const T *values;
T *grads;
if (i & 1)
{
values = y;
grads = grad_intermediate_y;
}
else
{
values = x;
grads = grad_intermediate_x;
}
// int degree = netpin_start[ii+1]-netpin_start[ii];
T x_max = -FLT_MAX;
T x_min = FLT_MAX;
for (int j = netpin_start[ii]; j < netpin_start[ii + 1]; ++j)
{
T xx = values[flat_netpin[j]];
x_max = max(xx, x_max);
x_min = min(xx, x_min);
}
T exp_x_sum = 0;
T exp_nx_sum = 0;
for (int j = netpin_start[ii]; j < netpin_start[ii + 1]; ++j)
{
T xx = values[flat_netpin[j]];
T exp_x = exp((xx - x_max) * (*inv_gamma));
T exp_nx = exp((x_min - xx) * (*inv_gamma));
exp_x_sum += exp_x;
exp_nx_sum += exp_nx;
}
partial_wl[i] = (log(exp_x_sum) + log(exp_nx_sum)) * (*gamma) + x_max - x_min;
T reciprocal_exp_x_sum = 1.0 / exp_x_sum;
T reciprocal_exp_nx_sum = 1.0 / exp_nx_sum;
for (int j = netpin_start[ii]; j < netpin_start[ii+1]; ++j)
{
int jj = flat_netpin[j];
T xx = values[jj];
T exp_x = exp((xx - x_max) * (*inv_gamma));
T exp_nx = exp((x_min - xx) * (*inv_gamma));
grads[jj] = (exp_x*reciprocal_exp_x_sum - exp_nx*reciprocal_exp_nx_sum);
}
}
}
template <typename T>
int computeLogSumExpWirelengthCudaLauncher(
const T* x, const T* y,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
const T* inv_gamma,
T* partial_wl,
T* grad_intermediate_x, T* grad_intermediate_y
)
{
int thread_count = 64;
int block_count = (num_nets * 2 + thread_count - 1) / thread_count; // separate x and y
computeLogSumExpWirelength<<<block_count, thread_count>>>(
x, y,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
inv_gamma,
partial_wl,
grad_intermediate_x, grad_intermediate_y
);
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
template int computeLogSumExpWirelengthCudaLauncher<T>(\
const T* x, const T* y, \
const int* flat_netpin, \
const int* netpin_start, \
const unsigned char* net_mask, \
int num_nets, \
const T* gamma, \
const T* inv_gamma, \
T* partial_wl, \
T* grad_intermediate_x, T* grad_intermediate_y \
);
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
144707a5403cd7f23226bec9eb7b59f9d2300bd1.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* atax.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "./polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size. */
#define NX 16384
#define NY 16384
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 1024
#define DIM_THREAD_BLOCK_Y 1
/* Pipeline */
#define NUM_CHUNK 16
#define CHUNK_SIZE NX/NUM_CHUNK
#define NUM_STREAMS 6
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *x, DATA_TYPE *A)
{
int i, j;
for (i = 0; i < NX; i++)
{
x[i] = i * M_PI;
for (j = 0; j < NY; j++)
{
A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX;
}
}
}
void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu)
{
int i, fail;
fail = 0;
for (i=0; i<NY; i++)
{
if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void atax_kernel1(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NX)
{
int j;
for(j=0; j < NY; j++)
{
tmp[i] += A[i * NY + j] * x[j];
}
}
}
__global__ void atax_kernel2(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < NY)
{
int i;
for(i=0; i < NX; i++)
{
y[j] += A[i * NY + j] * tmp[i];
}
}
}
__global__ void atax_kernel1_chunk(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp, int chunk)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int offset = CHUNK_SIZE*chunk;
if ( i < CHUNK_SIZE)
{
int j;
for(j=0; j < NY; j++)
{
tmp[i+offset] += A[(i+offset) * NY + j] * x[j];
}
}
}
__global__ void atax_kernel2_chunk(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp, int chunk)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < NY)
{
int i;
for(i= CHUNK_SIZE*chunk; i < CHUNK_SIZE*(chunk+1); i++)
{
y[j] += A[i * NY + j] * tmp[i];
}
}
}
void atax_cpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp)
{
int i,j;
for (i= 0; i < NY; i++)
{
y[i] = 0;
}
for (i = 0; i < NX; i++)
{
tmp[i] = 0;
for (j = 0; j < NY; j++)
{
tmp[i] = tmp[i] + A[i*NY + j] * x[j];
}
for (j = 0; j < NY; j++)
{
y[j] = y[j] + A[i*NY + j] * tmp[i];
}
}
}
void ataxGpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu)
{
DATA_TYPE *A_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY);
hipMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * NY);
hipMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * NY);
hipMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * NX);
hipEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
hipStream_t streams[NUM_STREAMS];
for (int i=0; i< NUM_STREAMS; i++)
hipStreamCreate(&(streams[i]));
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)CHUNK_SIZE) / ((float)block.x) )), 1);
dim3 grid2((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1);
hipEventRecord(start);
hipMemcpy(x_gpu, x, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice);
hipMemcpy(y_gpu, y, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice);
hipMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * NX, hipMemcpyHostToDevice);
for (int i = 0 ; i < NUM_CHUNK; i++){
hipMemcpyAsync(A_gpu+i*CHUNK_SIZE*NY, A+i*CHUNK_SIZE*NY, sizeof(DATA_TYPE) * NY * CHUNK_SIZE, hipMemcpyHostToDevice, streams[i % NUM_STREAMS]);
hipLaunchKernelGGL(( atax_kernel1_chunk), dim3(grid1), dim3(block),0,streams[i % NUM_STREAMS] , A_gpu,x_gpu,tmp_gpu,i);
hipLaunchKernelGGL(( atax_kernel2_chunk), dim3(grid2), dim3(block),0,streams[i % NUM_STREAMS] , A_gpu,y_gpu,tmp_gpu,i);
}
hipDeviceSynchronize();
hipMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs);
hipFree(A_gpu);
hipFree(x_gpu);
hipFree(y_gpu);
hipFree(tmp_gpu);
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* x;
DATA_TYPE* y;
DATA_TYPE* y_outputFromGpu;
DATA_TYPE* tmp;
/*
A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE));
x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
*/
hipHostMalloc((void **)&A, sizeof(DATA_TYPE) * NX * NY, hipHostMallocPortable);
hipHostMalloc((void **)&x, sizeof(DATA_TYPE) * NY, hipHostMallocPortable);
hipHostMalloc((void **)&y, sizeof(DATA_TYPE) * NY, hipHostMallocPortable);
hipHostMalloc((void **)&y_outputFromGpu, sizeof(DATA_TYPE) * NY, hipHostMallocPortable);
hipHostMalloc((void **)&tmp, sizeof(DATA_TYPE) * NX, hipHostMallocPortable);
init_array(x, A);
GPU_argv_init();
ataxGpu(A, x, y, tmp, y_outputFromGpu);
t_start = rtclock();
atax_cpu(A, x, y, tmp);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(y, y_outputFromGpu);
hipFree(A);
hipFree(x);
hipFree(y);
hipFree(y_outputFromGpu);
hipFree(tmp);
return 0;
}
|
144707a5403cd7f23226bec9eb7b59f9d2300bd1.cu
|
/**
* atax.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include "./polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size. */
#define NX 16384
#define NY 16384
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 1024
#define DIM_THREAD_BLOCK_Y 1
/* Pipeline */
#define NUM_CHUNK 16
#define CHUNK_SIZE NX/NUM_CHUNK
#define NUM_STREAMS 6
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *x, DATA_TYPE *A)
{
int i, j;
for (i = 0; i < NX; i++)
{
x[i] = i * M_PI;
for (j = 0; j < NY; j++)
{
A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX;
}
}
}
void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu)
{
int i, fail;
fail = 0;
for (i=0; i<NY; i++)
{
if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void atax_kernel1(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NX)
{
int j;
for(j=0; j < NY; j++)
{
tmp[i] += A[i * NY + j] * x[j];
}
}
}
__global__ void atax_kernel2(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < NY)
{
int i;
for(i=0; i < NX; i++)
{
y[j] += A[i * NY + j] * tmp[i];
}
}
}
__global__ void atax_kernel1_chunk(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp, int chunk)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int offset = CHUNK_SIZE*chunk;
if ( i < CHUNK_SIZE)
{
int j;
for(j=0; j < NY; j++)
{
tmp[i+offset] += A[(i+offset) * NY + j] * x[j];
}
}
}
__global__ void atax_kernel2_chunk(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp, int chunk)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < NY)
{
int i;
for(i= CHUNK_SIZE*chunk; i < CHUNK_SIZE*(chunk+1); i++)
{
y[j] += A[i * NY + j] * tmp[i];
}
}
}
void atax_cpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp)
{
int i,j;
for (i= 0; i < NY; i++)
{
y[i] = 0;
}
for (i = 0; i < NX; i++)
{
tmp[i] = 0;
for (j = 0; j < NY; j++)
{
tmp[i] = tmp[i] + A[i*NY + j] * x[j];
}
for (j = 0; j < NY; j++)
{
y[j] = y[j] + A[i*NY + j] * tmp[i];
}
}
}
void ataxGpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu)
{
DATA_TYPE *A_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY);
cudaMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * NY);
cudaMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * NY);
cudaMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * NX);
cudaEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaStream_t streams[NUM_STREAMS];
for (int i=0; i< NUM_STREAMS; i++)
cudaStreamCreate(&(streams[i]));
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)CHUNK_SIZE) / ((float)block.x) )), 1);
dim3 grid2((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1);
cudaEventRecord(start);
cudaMemcpy(x_gpu, x, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice);
cudaMemcpy(y_gpu, y, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice);
cudaMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * NX, cudaMemcpyHostToDevice);
for (int i = 0 ; i < NUM_CHUNK; i++){
cudaMemcpyAsync(A_gpu+i*CHUNK_SIZE*NY, A+i*CHUNK_SIZE*NY, sizeof(DATA_TYPE) * NY * CHUNK_SIZE, cudaMemcpyHostToDevice, streams[i % NUM_STREAMS]);
atax_kernel1_chunk<<< grid1, block,0,streams[i % NUM_STREAMS] >>>(A_gpu,x_gpu,tmp_gpu,i);
atax_kernel2_chunk<<< grid2, block,0,streams[i % NUM_STREAMS] >>>(A_gpu,y_gpu,tmp_gpu,i);
}
cudaDeviceSynchronize();
cudaMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs);
cudaFree(A_gpu);
cudaFree(x_gpu);
cudaFree(y_gpu);
cudaFree(tmp_gpu);
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* x;
DATA_TYPE* y;
DATA_TYPE* y_outputFromGpu;
DATA_TYPE* tmp;
/*
A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE));
x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
*/
cudaHostAlloc((void **)&A, sizeof(DATA_TYPE) * NX * NY, cudaHostAllocPortable);
cudaHostAlloc((void **)&x, sizeof(DATA_TYPE) * NY, cudaHostAllocPortable);
cudaHostAlloc((void **)&y, sizeof(DATA_TYPE) * NY, cudaHostAllocPortable);
cudaHostAlloc((void **)&y_outputFromGpu, sizeof(DATA_TYPE) * NY, cudaHostAllocPortable);
cudaHostAlloc((void **)&tmp, sizeof(DATA_TYPE) * NX, cudaHostAllocPortable);
init_array(x, A);
GPU_argv_init();
ataxGpu(A, x, y, tmp, y_outputFromGpu);
t_start = rtclock();
atax_cpu(A, x, y, tmp);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(y, y_outputFromGpu);
cudaFree(A);
cudaFree(x);
cudaFree(y);
cudaFree(y_outputFromGpu);
cudaFree(tmp);
return 0;
}
|
ae876ae62f17fc97ff10ac8c02da8f6dcf19e8d6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void markSegments( unsigned short * d_mark, unsigned int circuitGraphEdgeCount, unsigned int * d_cg_edge_start, unsigned int * d_cedgeCount, unsigned int circuitVertexSize){
unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x;
if(tid<circuitVertexSize){
d_mark[ d_cg_edge_start[tid]]=d_cedgeCount[tid];
}
}
|
ae876ae62f17fc97ff10ac8c02da8f6dcf19e8d6.cu
|
#include "includes.h"
__global__ void markSegments( unsigned short * d_mark, unsigned int circuitGraphEdgeCount, unsigned int * d_cg_edge_start, unsigned int * d_cedgeCount, unsigned int circuitVertexSize){
unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x;
if(tid<circuitVertexSize){
d_mark[ d_cg_edge_start[tid]]=d_cedgeCount[tid];
}
}
|
936689a03d001ba2bbe41b160a8ea32ac67d49c9.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
*
* test.c
* [email protected]
*
* A place for trying out various code
*/
#include <stdlib.h>
#include <stdio.h>
#include <ctype.h>
#include <hip/hip_runtime.h>
#define NPIXELS 100
static char daytab[2][13] = {
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
{0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}};
int day_of_year(int year, int month, int day);
unsigned char *landmask_d;
int main() {
// report on free mem
// checkCUDAMemory();
// allocate device memory for landmask
size_t landMemSize = NPIXELS * sizeof(char);
hipMalloc((void **)&landmask_d, landMemSize);
printf("allocating %ld device Kbytes for landmask\n", landMemSize/1024);
// checkCUDAError("memory allocation");
hipFree(landmask_d);
//printf("day of year:%d\n", day_of_year(2010, 8, 9) );
return 0;
}
int day_of_year(int year, int month, int day) {
int i, leap;
leap = year%4 == 0 && (year%100 != 0 || year%400 == 0);
for (i = 1; i < month; i++)
day += daytab[leap][i];
return day;
}
|
936689a03d001ba2bbe41b160a8ea32ac67d49c9.cu
|
/*
*
* test.c
* [email protected]
*
* A place for trying out various code
*/
#include <stdlib.h>
#include <stdio.h>
#include <ctype.h>
#include <cuda.h>
#define NPIXELS 100
static char daytab[2][13] = {
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
{0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}};
int day_of_year(int year, int month, int day);
unsigned char *landmask_d;
int main() {
// report on free mem
// checkCUDAMemory();
// allocate device memory for landmask
size_t landMemSize = NPIXELS * sizeof(char);
cudaMalloc((void **)&landmask_d, landMemSize);
printf("allocating %ld device Kbytes for landmask\n", landMemSize/1024);
// checkCUDAError("memory allocation");
cudaFree(landmask_d);
//printf("day of year:%d\n", day_of_year(2010, 8, 9) );
return 0;
}
int day_of_year(int year, int month, int day) {
int i, leap;
leap = year%4 == 0 && (year%100 != 0 || year%400 == 0);
for (i = 1; i < month; i++)
day += daytab[leap][i];
return day;
}
|
9bba21a86fb17a15643201d93a6a9e4094b95e77.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
//
// This work is made available under the Nvidia Source Code License-NC.
// To view a copy of this license, visit
// https://nvlabs.github.io/stylegan2/license.html
#include <torch/types.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
template <typename scalar_t>
static __global__ void fused_bias_act_kernel(scalar_t* out, const scalar_t* p_x, const scalar_t* p_b, const scalar_t* p_ref,
int act, int grad, scalar_t alpha, scalar_t scale, int loop_x, int size_x, int step_b, int size_b, int use_bias, int use_ref) {
int xi = blockIdx.x * loop_x * blockDim.x + threadIdx.x;
scalar_t zero = 0.0;
for (int loop_idx = 0; loop_idx < loop_x && xi < size_x; loop_idx++, xi += blockDim.x) {
scalar_t x = p_x[xi];
if (use_bias) {
x += p_b[(xi / step_b) % size_b];
}
scalar_t ref = use_ref ? p_ref[xi] : zero;
scalar_t y;
switch (act * 10 + grad) {
default:
case 10: y = x; break;
case 11: y = x; break;
case 12: y = 0.0; break;
case 30: y = (x > 0.0) ? x : x * alpha; break;
case 31: y = (ref > 0.0) ? x : x * alpha; break;
case 32: y = 0.0; break;
}
out[xi] = y * scale;
}
}
torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
int act, int grad, float alpha, float scale) {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto x = input.contiguous();
auto b = bias.contiguous();
auto ref = refer.contiguous();
int use_bias = b.numel() ? 1 : 0;
int use_ref = ref.numel() ? 1 : 0;
int size_x = x.numel();
int size_b = b.numel();
int step_b = 1;
for (int i = 1 + 1; i < x.dim(); i++) {
step_b *= x.size(i);
}
int loop_x = 4;
int block_size = 4 * 32;
int grid_size = (size_x - 1) / (loop_x * block_size) + 1;
auto y = torch::empty_like(x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "fused_bias_act_kernel", [&] {
hipLaunchKernelGGL(( fused_bias_act_kernel<scalar_t>), dim3(grid_size), dim3(block_size), 0, stream,
y.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
ref.data_ptr<scalar_t>(),
act,
grad,
alpha,
scale,
loop_x,
size_x,
step_b,
size_b,
use_bias,
use_ref
);
});
return y;
}
|
9bba21a86fb17a15643201d93a6a9e4094b95e77.cu
|
// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
//
// This work is made available under the Nvidia Source Code License-NC.
// To view a copy of this license, visit
// https://nvlabs.github.io/stylegan2/license.html
#include <torch/types.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <cuda.h>
#include <cuda_runtime.h>
template <typename scalar_t>
static __global__ void fused_bias_act_kernel(scalar_t* out, const scalar_t* p_x, const scalar_t* p_b, const scalar_t* p_ref,
int act, int grad, scalar_t alpha, scalar_t scale, int loop_x, int size_x, int step_b, int size_b, int use_bias, int use_ref) {
int xi = blockIdx.x * loop_x * blockDim.x + threadIdx.x;
scalar_t zero = 0.0;
for (int loop_idx = 0; loop_idx < loop_x && xi < size_x; loop_idx++, xi += blockDim.x) {
scalar_t x = p_x[xi];
if (use_bias) {
x += p_b[(xi / step_b) % size_b];
}
scalar_t ref = use_ref ? p_ref[xi] : zero;
scalar_t y;
switch (act * 10 + grad) {
default:
case 10: y = x; break;
case 11: y = x; break;
case 12: y = 0.0; break;
case 30: y = (x > 0.0) ? x : x * alpha; break;
case 31: y = (ref > 0.0) ? x : x * alpha; break;
case 32: y = 0.0; break;
}
out[xi] = y * scale;
}
}
torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
int act, int grad, float alpha, float scale) {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto x = input.contiguous();
auto b = bias.contiguous();
auto ref = refer.contiguous();
int use_bias = b.numel() ? 1 : 0;
int use_ref = ref.numel() ? 1 : 0;
int size_x = x.numel();
int size_b = b.numel();
int step_b = 1;
for (int i = 1 + 1; i < x.dim(); i++) {
step_b *= x.size(i);
}
int loop_x = 4;
int block_size = 4 * 32;
int grid_size = (size_x - 1) / (loop_x * block_size) + 1;
auto y = torch::empty_like(x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "fused_bias_act_kernel", [&] {
fused_bias_act_kernel<scalar_t><<<grid_size, block_size, 0, stream>>>(
y.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
ref.data_ptr<scalar_t>(),
act,
grad,
alpha,
scale,
loop_x,
size_x,
step_b,
size_b,
use_bias,
use_ref
);
});
return y;
}
|
f45bf71835fd2ced3437d132ce105c10ab584032.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/Resize.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
namespace at {
namespace native {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t, typename IndexType, bool upper>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void triu_tril_kernel(
cuda::detail::TensorInfo<scalar_t, IndexType> result_info,
const cuda::detail::TensorInfo<scalar_t, IndexType> self_info,
const int64_t k,
const int64_t N) {
int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_idx >= N) {
return;
}
auto dims = self_info.dims;
IndexType self_offset = 0, result_offset = 0;
// Compute column index and corresponding offset
IndexType col = linear_idx % self_info.sizes[dims - 1];
linear_idx /= self_info.sizes[dims - 1];
self_offset += self_info.strides[dims - 1] * col;
result_offset += result_info.strides[dims - 1] * col;
// Compute row index and corresponding offset
IndexType row = linear_idx % self_info.sizes[dims - 2];
linear_idx /= self_info.sizes[dims - 2];
self_offset += self_info.strides[dims - 2] * row;
result_offset += result_info.strides[dims - 2] * row;
// Compute remaining offsets
IndexType running_index;
#pragma unroll
for (IndexType i = dims - 3; i >= 0; --i) {
running_index = linear_idx % self_info.sizes[i];
linear_idx /= self_info.sizes[i];
self_offset += running_index * self_info.strides[i];
result_offset += running_index * result_info.strides[i];
}
bool mask = upper ? (col - row >= k) : (col - row <= k);
result_info.data[result_offset] = mask ? self_info.data[self_offset] : scalar_t(0);
}
template <bool upper>
Tensor& triu_tril_cuda_template(Tensor& result, const Tensor& self, int64_t k, const char* name) {
int64_t N = self.numel();
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid((N + dim_block.x - 1) / dim_block.x);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, self.scalar_type(), "triu_tril_cuda_template", [&]{
if (cuda::detail::canUse32BitIndexMath(result) && cuda::detail::canUse32BitIndexMath(self)) {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(self);
hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, int32_t, upper>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result_info, self_info, k, N);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(self);
hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, int64_t, upper>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result_info, self_info, k, N);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
});
return result;
}
Tensor& tril_cuda_(Tensor &self, int64_t k) {
return tril_cuda_out(self, k, self);
}
Tensor& tril_cuda_out(const Tensor& self, int64_t k, Tensor &result) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
return triu_tril_cuda_template<false>(result, self, k, "tril");
}
Tensor& triu_cuda_(Tensor &self, int64_t k) {
return triu_cuda_out(self, k, self);
}
Tensor& triu_cuda_out(const Tensor& self, int64_t k, Tensor &result) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
return triu_tril_cuda_template<true>(result, self, k, "triu");
}
// Copy the kth diagonal of a matrix B to a vector A.
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void copy_from_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideA) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t bOffset = start + strideSum * linearIndex;
a[strideA * linearIndex] = b[bOffset];
}
}
// Copy vector B to the kth diagonal of a matrix A
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void copy_to_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideB) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t aOffset = start + strideSum * linearIndex;
a[aOffset] = b[strideB * linearIndex];
}
}
template <typename scalar_t>
Tensor& apply_diag(Tensor& result, const Tensor& self, int64_t dimension) {
TORCH_CHECK(
self.dim() == 1 || self.dim() == 2, "matrix or a vector expected");
TensorArg result_arg{result, "result", 1};
TensorArg self_arg{self, "self", 2};
checkAllSameGPU("diag", {result_arg, self_arg});
checkSameType("diag", result_arg, self_arg);
int nDimension = self.dim();
if (nDimension == 2) {
auto self_stride_0 = self.stride(0);
auto self_stride_1 = self.stride(1);
int sz;
if (dimension > 0) {
sz = ::min(self.size(0), self.size(1) - dimension);
} else {
sz = ::min(self.size(0) + dimension, self.size(1));
}
at::native::resize_output(result, {sz});
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride = result.stride(0);
const dim3 threads(::min(
int(sz),
int(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock)));
const dim3 grid(
::min(int(1024), cuda::ATenCeilDiv(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * self_stride_1
: -dimension * self_stride_0);
// Kernel Launch
hipLaunchKernelGGL(( copy_from_diagonal_kernel<scalar_t>)
, dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
sz,
self_stride_0 + self_stride_1,
result_stride);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} else {
auto n_elems = self.numel();
auto sz = (dimension > 0) ? n_elems + dimension : n_elems - dimension;
auto self_stride = self.stride(0);
at::native::resize_output(result, {sz, sz});
result.zero_();
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride_0 = result.stride(0);
auto result_stride_1 = result.stride(1);
const dim3 threads(::min(
int(sz), at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock));
const dim3 grid(
::min(int(1024), cuda::ATenCeilDiv(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * result_stride_1
: -dimension * result_stride_0);
// Kernel Launch
hipLaunchKernelGGL(( copy_to_diagonal_kernel<scalar_t>)
, dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
n_elems,
result_stride_0 + result_stride_1,
self_stride);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
return result;
}
Tensor& diag_cuda_out(const Tensor& self, int64_t dimension, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool,
self.scalar_type(), "diag_cuda",
[&] {
apply_diag<scalar_t>(result, self, dimension);
});
return result;
}
Tensor trace_cuda(const Tensor& self) {
TORCH_CHECK(self.dim() == 2, "expected a matrix");
int dimension = 0;
auto result = at::diag(self, dimension);
return result.sum();
}
} // namespace native
} // namespace at
|
f45bf71835fd2ced3437d132ce105c10ab584032.cu
|
#include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/Resize.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
namespace at {
namespace native {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t, typename IndexType, bool upper>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void triu_tril_kernel(
cuda::detail::TensorInfo<scalar_t, IndexType> result_info,
const cuda::detail::TensorInfo<scalar_t, IndexType> self_info,
const int64_t k,
const int64_t N) {
int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_idx >= N) {
return;
}
auto dims = self_info.dims;
IndexType self_offset = 0, result_offset = 0;
// Compute column index and corresponding offset
IndexType col = linear_idx % self_info.sizes[dims - 1];
linear_idx /= self_info.sizes[dims - 1];
self_offset += self_info.strides[dims - 1] * col;
result_offset += result_info.strides[dims - 1] * col;
// Compute row index and corresponding offset
IndexType row = linear_idx % self_info.sizes[dims - 2];
linear_idx /= self_info.sizes[dims - 2];
self_offset += self_info.strides[dims - 2] * row;
result_offset += result_info.strides[dims - 2] * row;
// Compute remaining offsets
IndexType running_index;
#pragma unroll
for (IndexType i = dims - 3; i >= 0; --i) {
running_index = linear_idx % self_info.sizes[i];
linear_idx /= self_info.sizes[i];
self_offset += running_index * self_info.strides[i];
result_offset += running_index * result_info.strides[i];
}
bool mask = upper ? (col - row >= k) : (col - row <= k);
result_info.data[result_offset] = mask ? self_info.data[self_offset] : scalar_t(0);
}
template <bool upper>
Tensor& triu_tril_cuda_template(Tensor& result, const Tensor& self, int64_t k, const char* name) {
int64_t N = self.numel();
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid((N + dim_block.x - 1) / dim_block.x);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, self.scalar_type(), "triu_tril_cuda_template", [&]{
if (cuda::detail::canUse32BitIndexMath(result) && cuda::detail::canUse32BitIndexMath(self)) {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(self);
triu_tril_kernel<scalar_t, int32_t, upper>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
result_info, self_info, k, N);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(self);
triu_tril_kernel<scalar_t, int64_t, upper>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
result_info, self_info, k, N);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
return result;
}
Tensor& tril_cuda_(Tensor &self, int64_t k) {
return tril_cuda_out(self, k, self);
}
Tensor& tril_cuda_out(const Tensor& self, int64_t k, Tensor &result) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
return triu_tril_cuda_template<false>(result, self, k, "tril");
}
Tensor& triu_cuda_(Tensor &self, int64_t k) {
return triu_cuda_out(self, k, self);
}
Tensor& triu_cuda_out(const Tensor& self, int64_t k, Tensor &result) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
return triu_tril_cuda_template<true>(result, self, k, "triu");
}
// Copy the kth diagonal of a matrix B to a vector A.
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void copy_from_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideA) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t bOffset = start + strideSum * linearIndex;
a[strideA * linearIndex] = b[bOffset];
}
}
// Copy vector B to the kth diagonal of a matrix A
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void copy_to_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideB) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t aOffset = start + strideSum * linearIndex;
a[aOffset] = b[strideB * linearIndex];
}
}
template <typename scalar_t>
Tensor& apply_diag(Tensor& result, const Tensor& self, int64_t dimension) {
TORCH_CHECK(
self.dim() == 1 || self.dim() == 2, "matrix or a vector expected");
TensorArg result_arg{result, "result", 1};
TensorArg self_arg{self, "self", 2};
checkAllSameGPU("diag", {result_arg, self_arg});
checkSameType("diag", result_arg, self_arg);
int nDimension = self.dim();
if (nDimension == 2) {
auto self_stride_0 = self.stride(0);
auto self_stride_1 = self.stride(1);
int sz;
if (dimension > 0) {
sz = std::min(self.size(0), self.size(1) - dimension);
} else {
sz = std::min(self.size(0) + dimension, self.size(1));
}
at::native::resize_output(result, {sz});
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride = result.stride(0);
const dim3 threads(std::min(
int(sz),
int(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock)));
const dim3 grid(
std::min(int(1024), cuda::ATenCeilDiv(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * self_stride_1
: -dimension * self_stride_0);
// Kernel Launch
copy_from_diagonal_kernel<scalar_t>
<<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
sz,
self_stride_0 + self_stride_1,
result_stride);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} else {
auto n_elems = self.numel();
auto sz = (dimension > 0) ? n_elems + dimension : n_elems - dimension;
auto self_stride = self.stride(0);
at::native::resize_output(result, {sz, sz});
result.zero_();
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride_0 = result.stride(0);
auto result_stride_1 = result.stride(1);
const dim3 threads(std::min(
int(sz), at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock));
const dim3 grid(
std::min(int(1024), cuda::ATenCeilDiv(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * result_stride_1
: -dimension * result_stride_0);
// Kernel Launch
copy_to_diagonal_kernel<scalar_t>
<<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
n_elems,
result_stride_0 + result_stride_1,
self_stride);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
return result;
}
Tensor& diag_cuda_out(const Tensor& self, int64_t dimension, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool,
self.scalar_type(), "diag_cuda",
[&] {
apply_diag<scalar_t>(result, self, dimension);
});
return result;
}
Tensor trace_cuda(const Tensor& self) {
TORCH_CHECK(self.dim() == 2, "expected a matrix");
int dimension = 0;
auto result = at::diag(self, dimension);
return result.sum();
}
} // namespace native
} // namespace at
|
b212d06cae534e9d9930dab8dd78785310c0bb5e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* MIT License
*
* Copyright (c) 2016, Max Grossman, Computer Science Department, Rice University
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdio.h>
#include <assert.h>
#include <common.h>
__global__ void vector_add(int *C, int *A, int *B, int N, int repeat) {
int j;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
C[i] = A[i] + B[i];
for (j = 0; j < repeat - 1; j++) {
C[i] += A[i] + B[i];
}
}
}
void host_vector_add(int *C, int *A, int *B, int N, int repeat) {
for (int i = 0; i < N; i++) {
C[i] = A[i] + B[i];
for (int j = 0; j < repeat - 1; j++) {
C[i] += A[i] + B[i];
}
}
}
int main(int argc, char **argv) {
int i;
int N = 1024;
int ntiles = 4;
int repeat = 20;
const int threads_per_block = 128;
if (argc > 1) {
N = atoi(argv[1]);
}
if (argc > 2) {
ntiles = atoi(argv[2]);
}
if (argc > 3) {
repeat = atoi(argv[3]);
}
const int tile_size = (N + ntiles - 1) / ntiles;
int *A, *B, *C;
int *d_A, *d_B, *d_C;
// Allocate space on the host for each array
CHECK_CUDA(hipHostMalloc((void **)&A, N * sizeof(int)));
CHECK_CUDA(hipHostMalloc((void **)&B, N * sizeof(int)));
CHECK_CUDA(hipHostMalloc((void **)&C, N * sizeof(int)));
// Allocate space on the device for each array
CHECK_CUDA(hipMalloc(&d_A, N * sizeof(int)));
CHECK_CUDA(hipMalloc(&d_B, N * sizeof(int)));
CHECK_CUDA(hipMalloc(&d_C, N * sizeof(int)));
// Populate host arrays
for (i = 0; i < N; i++) {
A[i] = i;
B[i] = 2 * i;
C[i] = 0;
}
hipStream_t *streams = (hipStream_t *)malloc(ntiles * sizeof(hipStream_t));
for (int t = 0; t < ntiles; t++) {
CHECK_CUDA(hipStreamCreate(streams + t));
}
const unsigned long long start_device = current_time_ns();
for (int t = 0; t < ntiles; t++) {
const int tile_start = t * tile_size;
int tile_end = (t + 1) * tile_size;
if (tile_end > N) tile_end = N;
const int tile_size = tile_end - tile_start;
CHECK_CUDA(hipMemcpyAsync(d_A + tile_start, A + tile_start,
tile_size * sizeof(int), hipMemcpyHostToDevice,
streams[t]));
}
for (int t = 0; t < ntiles; t++) {
const int tile_start = t * tile_size;
int tile_end = (t + 1) * tile_size;
if (tile_end > N) tile_end = N;
const int tile_size = tile_end - tile_start;
CHECK_CUDA(hipMemcpyAsync(d_B + tile_start, B + tile_start,
tile_size * sizeof(int), hipMemcpyHostToDevice,
streams[t]));
}
for (int t = 0; t < ntiles; t++) {
const int tile_start = t * tile_size;
int tile_end = (t + 1) * tile_size;
if (tile_end > N) tile_end = N;
const int tile_size = tile_end - tile_start;
CHECK_CUDA(hipMemcpyAsync(d_C + tile_start, C + tile_start,
tile_size * sizeof(int), hipMemcpyHostToDevice,
streams[t]));
}
const int nblocks = (N + threads_per_block - 1) / threads_per_block;
for (int t = 0; t < ntiles; t++) {
const int tile_start = t * tile_size;
int tile_end = (t + 1) * tile_size;
if (tile_end > N) tile_end = N;
const int tile_size = tile_end - tile_start;
hipLaunchKernelGGL(( vector_add), dim3(nblocks), dim3(threads_per_block), 0, streams[t],
d_C + tile_start, d_B + tile_start, d_A + tile_start, tile_size,
repeat);
}
// Transfer the contents of the output array back to the host
for (int t = 0; t < ntiles; t++) {
const int tile_start = t * tile_size;
int tile_end = (t + 1) * tile_size;
if (tile_end > N) tile_end = N;
const int tile_size = tile_end - tile_start;
CHECK_CUDA(hipMemcpyAsync(C + tile_start, d_C + tile_start,
tile_size * sizeof(int), hipMemcpyDeviceToHost,
streams[t]));
}
CHECK_CUDA(hipDeviceSynchronize());
const unsigned long long end_device = current_time_ns();
// Validate GPU results
for (i = 0; i < N; i++) {
if (C[i] != repeat * (A[i] + B[i])) {
fprintf(stderr, "Mismatch at index %d: expected %d but got %d\n", i,
repeat * (A[i] + B[i]), C[i]);
return 1;
}
}
// Run on the host
const unsigned long long start_host = current_time_ns();
host_vector_add(C, B, A, N, repeat);
const unsigned long long end_host = current_time_ns();
const unsigned long long elapsed_device = (end_device - start_device) / 1000;
const unsigned long long elapsed_host = (end_host - start_host) / 1000;
printf("Finished! All %d elements validate using %d threads per block.\n",
N, threads_per_block);
printf("Took %llu microseconds on the host\n", elapsed_host);
printf("Took %llu microseconds on the device, %2.5fx speedup\n",
elapsed_device, (double)elapsed_host / (double)elapsed_device);
return 0;
}
|
b212d06cae534e9d9930dab8dd78785310c0bb5e.cu
|
/*
* MIT License
*
* Copyright (c) 2016, Max Grossman, Computer Science Department, Rice University
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdio.h>
#include <assert.h>
#include <common.h>
__global__ void vector_add(int *C, int *A, int *B, int N, int repeat) {
int j;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
C[i] = A[i] + B[i];
for (j = 0; j < repeat - 1; j++) {
C[i] += A[i] + B[i];
}
}
}
void host_vector_add(int *C, int *A, int *B, int N, int repeat) {
for (int i = 0; i < N; i++) {
C[i] = A[i] + B[i];
for (int j = 0; j < repeat - 1; j++) {
C[i] += A[i] + B[i];
}
}
}
int main(int argc, char **argv) {
int i;
int N = 1024;
int ntiles = 4;
int repeat = 20;
const int threads_per_block = 128;
if (argc > 1) {
N = atoi(argv[1]);
}
if (argc > 2) {
ntiles = atoi(argv[2]);
}
if (argc > 3) {
repeat = atoi(argv[3]);
}
const int tile_size = (N + ntiles - 1) / ntiles;
int *A, *B, *C;
int *d_A, *d_B, *d_C;
// Allocate space on the host for each array
CHECK_CUDA(cudaMallocHost((void **)&A, N * sizeof(int)));
CHECK_CUDA(cudaMallocHost((void **)&B, N * sizeof(int)));
CHECK_CUDA(cudaMallocHost((void **)&C, N * sizeof(int)));
// Allocate space on the device for each array
CHECK_CUDA(cudaMalloc(&d_A, N * sizeof(int)));
CHECK_CUDA(cudaMalloc(&d_B, N * sizeof(int)));
CHECK_CUDA(cudaMalloc(&d_C, N * sizeof(int)));
// Populate host arrays
for (i = 0; i < N; i++) {
A[i] = i;
B[i] = 2 * i;
C[i] = 0;
}
cudaStream_t *streams = (cudaStream_t *)malloc(ntiles * sizeof(cudaStream_t));
for (int t = 0; t < ntiles; t++) {
CHECK_CUDA(cudaStreamCreate(streams + t));
}
const unsigned long long start_device = current_time_ns();
for (int t = 0; t < ntiles; t++) {
const int tile_start = t * tile_size;
int tile_end = (t + 1) * tile_size;
if (tile_end > N) tile_end = N;
const int tile_size = tile_end - tile_start;
CHECK_CUDA(cudaMemcpyAsync(d_A + tile_start, A + tile_start,
tile_size * sizeof(int), cudaMemcpyHostToDevice,
streams[t]));
}
for (int t = 0; t < ntiles; t++) {
const int tile_start = t * tile_size;
int tile_end = (t + 1) * tile_size;
if (tile_end > N) tile_end = N;
const int tile_size = tile_end - tile_start;
CHECK_CUDA(cudaMemcpyAsync(d_B + tile_start, B + tile_start,
tile_size * sizeof(int), cudaMemcpyHostToDevice,
streams[t]));
}
for (int t = 0; t < ntiles; t++) {
const int tile_start = t * tile_size;
int tile_end = (t + 1) * tile_size;
if (tile_end > N) tile_end = N;
const int tile_size = tile_end - tile_start;
CHECK_CUDA(cudaMemcpyAsync(d_C + tile_start, C + tile_start,
tile_size * sizeof(int), cudaMemcpyHostToDevice,
streams[t]));
}
const int nblocks = (N + threads_per_block - 1) / threads_per_block;
for (int t = 0; t < ntiles; t++) {
const int tile_start = t * tile_size;
int tile_end = (t + 1) * tile_size;
if (tile_end > N) tile_end = N;
const int tile_size = tile_end - tile_start;
vector_add<<<nblocks, threads_per_block, 0, streams[t]>>>(
d_C + tile_start, d_B + tile_start, d_A + tile_start, tile_size,
repeat);
}
// Transfer the contents of the output array back to the host
for (int t = 0; t < ntiles; t++) {
const int tile_start = t * tile_size;
int tile_end = (t + 1) * tile_size;
if (tile_end > N) tile_end = N;
const int tile_size = tile_end - tile_start;
CHECK_CUDA(cudaMemcpyAsync(C + tile_start, d_C + tile_start,
tile_size * sizeof(int), cudaMemcpyDeviceToHost,
streams[t]));
}
CHECK_CUDA(cudaDeviceSynchronize());
const unsigned long long end_device = current_time_ns();
// Validate GPU results
for (i = 0; i < N; i++) {
if (C[i] != repeat * (A[i] + B[i])) {
fprintf(stderr, "Mismatch at index %d: expected %d but got %d\n", i,
repeat * (A[i] + B[i]), C[i]);
return 1;
}
}
// Run on the host
const unsigned long long start_host = current_time_ns();
host_vector_add(C, B, A, N, repeat);
const unsigned long long end_host = current_time_ns();
const unsigned long long elapsed_device = (end_device - start_device) / 1000;
const unsigned long long elapsed_host = (end_host - start_host) / 1000;
printf("Finished! All %d elements validate using %d threads per block.\n",
N, threads_per_block);
printf("Took %llu microseconds on the host\n", elapsed_host);
printf("Took %llu microseconds on the device, %2.5fx speedup\n",
elapsed_device, (double)elapsed_host / (double)elapsed_device);
return 0;
}
|
2baed0f3bad4a90f67697c8a6bc3bbac9194789f.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef opencl_translator_cu // pragma once
#define opencl_translator_cu
#ifdef __NVCC__
#ifndef STATIC_KEYWORD
#define STATIC_KEYWORD __device__
#endif
// See https://www.khronos.org/registry/OpenCL/sdk/1.0/docs/man/xhtml/functionQualifiers.html
#define vec_type_hint(typen)
#define work_group_size_hint(X, Y, Z)
#define reqd_work_group_size(X, Y, Z)
#define __kernel __global__
#define __global
#define __local __shared__
#define __constant __constant__
typedef unsigned int uint;
// https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/barrier.html
enum cl_mem_fence_flags
{
CLK_LOCAL_MEM_FENCE,
CLK_GLOBAL_MEM_FENCE
};
STATIC_KEYWORD void barrier(cl_mem_fence_flags flags)
{
__syncthreads();
}
// https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/workItemFunctions.html
STATIC_KEYWORD size_t getXYZByIndex(dim3 xyz, uint dimindx)
{
if (dimindx == 2) {
return xyz.z;
} else if (dimindx == 1) {
return xyz.y;
} else {
return xyz.x;
}
}
STATIC_KEYWORD size_t get_global_size (uint dimindx) {
return getXYZByIndex(gridDim, dimindx) * getXYZByIndex(blockDim, dimindx);
}
STATIC_KEYWORD size_t get_global_id (uint dimindx) {
return getXYZByIndex(blockIdx, dimindx) * getXYZByIndex(blockDim, dimindx) + getXYZByIndex(threadIdx, dimindx);
}
STATIC_KEYWORD size_t get_local_size (uint dimindx) {
return getXYZByIndex(blockDim, dimindx);
}
STATIC_KEYWORD size_t get_local_id (uint dimindx) {
return getXYZByIndex(threadIdx, dimindx);
}
STATIC_KEYWORD size_t get_num_groups (uint dimindx) {
return getXYZByIndex(gridDim, dimindx);
}
STATIC_KEYWORD size_t get_group_id (uint dimindx) {
return getXYZByIndex(blockIdx, dimindx);
}
STATIC_KEYWORD uint get_work_dim()
{
if (get_global_size(2) > 1) {
return 3;
} else if (get_global_size(1) > 1) {
return 2;
} else {
return 1;
}
}
#define WARP_SIZE 32
#endif
#ifdef __CUDA_ARCH__
#define DEVICE_CODE
#else
#define HOST_CODE
#endif
#include <libgpu/work_size.h>
#include <libgpu/shared_device_buffer.h>
#include <libgpu/cuda/utils.h>
#include <hip/hip_runtime_api.h>
#endif // pragma once
|
2baed0f3bad4a90f67697c8a6bc3bbac9194789f.cu
|
#ifndef opencl_translator_cu // pragma once
#define opencl_translator_cu
#ifdef __NVCC__
#ifndef STATIC_KEYWORD
#define STATIC_KEYWORD __device__
#endif
// See https://www.khronos.org/registry/OpenCL/sdk/1.0/docs/man/xhtml/functionQualifiers.html
#define vec_type_hint(typen)
#define work_group_size_hint(X, Y, Z)
#define reqd_work_group_size(X, Y, Z)
#define __kernel __global__
#define __global
#define __local __shared__
#define __constant __constant__
typedef unsigned int uint;
// https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/barrier.html
enum cl_mem_fence_flags
{
CLK_LOCAL_MEM_FENCE,
CLK_GLOBAL_MEM_FENCE
};
STATIC_KEYWORD void barrier(cl_mem_fence_flags flags)
{
__syncthreads();
}
// https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/workItemFunctions.html
STATIC_KEYWORD size_t getXYZByIndex(dim3 xyz, uint dimindx)
{
if (dimindx == 2) {
return xyz.z;
} else if (dimindx == 1) {
return xyz.y;
} else {
return xyz.x;
}
}
STATIC_KEYWORD size_t get_global_size (uint dimindx) {
return getXYZByIndex(gridDim, dimindx) * getXYZByIndex(blockDim, dimindx);
}
STATIC_KEYWORD size_t get_global_id (uint dimindx) {
return getXYZByIndex(blockIdx, dimindx) * getXYZByIndex(blockDim, dimindx) + getXYZByIndex(threadIdx, dimindx);
}
STATIC_KEYWORD size_t get_local_size (uint dimindx) {
return getXYZByIndex(blockDim, dimindx);
}
STATIC_KEYWORD size_t get_local_id (uint dimindx) {
return getXYZByIndex(threadIdx, dimindx);
}
STATIC_KEYWORD size_t get_num_groups (uint dimindx) {
return getXYZByIndex(gridDim, dimindx);
}
STATIC_KEYWORD size_t get_group_id (uint dimindx) {
return getXYZByIndex(blockIdx, dimindx);
}
STATIC_KEYWORD uint get_work_dim()
{
if (get_global_size(2) > 1) {
return 3;
} else if (get_global_size(1) > 1) {
return 2;
} else {
return 1;
}
}
#define WARP_SIZE 32
#endif
#ifdef __CUDA_ARCH__
#define DEVICE_CODE
#else
#define HOST_CODE
#endif
#include <libgpu/work_size.h>
#include <libgpu/shared_device_buffer.h>
#include <libgpu/cuda/utils.h>
#include <cuda_runtime_api.h>
#endif // pragma once
|
70ebe02dcac3ebac6ca326abfb09dc6bf76130b8.hip
|
// !!! This is a file automatically generated by hipify!!!
// #CSCS CUDA Training
//
// #Example 2.0 - sum vectors, fix number of threads per block
//
// #Author Ugo Varetto
//
// #Goal: compute the scalar product of two 1D vectors using a number of GPU threads greater than or equal to
// the number of vector elements and not evenly divisible by the block size
// #Rationale: shows how to implement a kernel with a computation/memory configuration that matches
// the domain layout. Each threads computes at most one element of the output vector.
//
// #Solution:
// . number of elements in the output array = E
// . number of threads per block = Tb
// The number of blocks is = ( E + Tb - 1 ) div Tb where 'div' is the integer division operator
// Each thread on the GPU computes one(thread id < vector size) or zero(thread id >= vector size)
// elements of the output vector.
//
//
// #Code: typical flow:
// 1) compute launch grid configuration
// 2) allocate data on host(cpu) and device(gpu)
// 3) copy data from host to device
// 4) launch kernel
// 5) read data back
// 6) consume data (in this case print result)
// 7) free memory
//
// #Compilation: nvcc -arch=sm_13 2_0_sum-vectors.cu -o sum-vectors-1
//
// #Execution: ./sum-vectors-1
//
// #Note: kernel invocations ( foo<<<...>>>(...) ) are *always* asynchronous and a call to
// hipDeviceSynchronize() is required to wait for the end of kernel execution from
// a host thread; in case of synchronous copy operations like hipMemcpy(...,cudaDeviceToHost)
// kernel execution is guaranteed to be terminated before data are copied
//
// #Note: the code is C++ also because the default compilation mode for CUDA is C++, all functions
// are named with C++ convention and the syntax is checked by default against C++ grammar rules
//
// #Note: -arch=sm_13 allows the code to run on every card with hw architecture GT200 (gtx 2xx) or better
//
// #Note: -arch=sm_13 is the lowest architecture version that supports double precision
//
// #Note: the example can be extended to read configuration data and array size from the command line
// and could be timed to investigate how performance is dependent on single/double precision
// and thread block size
//#include <hip/hip_runtime.h> // automatically added by nvcc
#include <vector>
#include <iostream>
#include <iomanip>
#include <sstream>
#include <string>
typedef float real_t;
// In this case the kernel assumes that the computation was started with enough threads to cover the entire domain.
// This is the preferred solution provided there are enough threads to cover the entire domain which might not be the
// case in case of a 1D grid layout (max number of threads = 512 threads per block x 65536 blocks = 2^25 = 32 Mi threads)
__global__ void sum_vectors( const real_t* v1, const real_t* v2, real_t* out, size_t num_elements ) {
// compute current thread id
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
// since we assume that num threads >= num element we need to make sure we do not write outside the
// range of the output buffer
if( xIndex < num_elements ) out[ xIndex ] = v1[ xIndex ] + v2[ xIndex ];
}
//------------------------------------------------------------------------------
int main( int , char** ) {
const int VECTOR_SIZE = 0x10000 + 1; //vector size 65537
const int SIZE = sizeof( real_t ) * VECTOR_SIZE; // total size in bytes
const int THREADS_PER_BLOCK = 32; //number of gpu threads per block
// block size: the number of threads per block multiplied by the number of blocks
// must be at least equal to NUMBER_OF_THREADS
const int NUMBER_OF_BLOCKS = ( VECTOR_SIZE + THREADS_PER_BLOCK - 1 ) / THREADS_PER_BLOCK;
// if number of threads is not evenly divisable by the number of threads per block
// we need an additional block; the above code can be rewritten as
// if( NUMBER_OF_THREADS % THREADS_PER_BLOCK == 0) BLOCK_SIZE = NUMBER_OF_THREADS / THREADS_PER_BLOCK;
// else BLOCK_SIZE = NUMBER_OF_THREADS / THREADS_PER_BLOCK + 1
// host allocated storage; use std vectors to simplify memory management
// and initialization
std::vector< real_t > v1 ( VECTOR_SIZE, 1.f ); //initialize all elements to 1
std::vector< real_t > v2 ( VECTOR_SIZE, 2.f ); //initialize all elements to 2
std::vector< real_t > vout( VECTOR_SIZE, 0.f ); //initialize all elements to 0
// gpu allocated storage
real_t* dev_in1 = 0; //vector 1
real_t* dev_in2 = 0; //vector 2
real_t* dev_out = 0; //result value
hipMalloc( &dev_in1, SIZE );
hipMalloc( &dev_in2, SIZE );
hipMalloc( &dev_out, SIZE );
// copy data to GPU
hipMemcpy( dev_in1, &v1[ 0 ], SIZE, hipMemcpyHostToDevice );
hipMemcpy( dev_in2, &v2[ 0 ], SIZE, hipMemcpyHostToDevice );
// execute kernel with num threads >= num elements
hipLaunchKernelGGL(( sum_vectors), dim3(NUMBER_OF_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, dev_in1, dev_in2, dev_out, VECTOR_SIZE );
// read back result
hipMemcpy( &vout[ 0 ], dev_out, SIZE, hipMemcpyDeviceToHost );
// print first and last element of vector
std::cout << "result: " << vout.front() << ".." << vout.back() << std::endl;
// free memory
hipFree( dev_in1 );
hipFree( dev_in2 );
hipFree( dev_out );
return 0;
}
|
70ebe02dcac3ebac6ca326abfb09dc6bf76130b8.cu
|
// #CSCS CUDA Training
//
// #Example 2.0 - sum vectors, fix number of threads per block
//
// #Author Ugo Varetto
//
// #Goal: compute the scalar product of two 1D vectors using a number of GPU threads greater than or equal to
// the number of vector elements and not evenly divisible by the block size
// #Rationale: shows how to implement a kernel with a computation/memory configuration that matches
// the domain layout. Each threads computes at most one element of the output vector.
//
// #Solution:
// . number of elements in the output array = E
// . number of threads per block = Tb
// The number of blocks is = ( E + Tb - 1 ) div Tb where 'div' is the integer division operator
// Each thread on the GPU computes one(thread id < vector size) or zero(thread id >= vector size)
// elements of the output vector.
//
//
// #Code: typical flow:
// 1) compute launch grid configuration
// 2) allocate data on host(cpu) and device(gpu)
// 3) copy data from host to device
// 4) launch kernel
// 5) read data back
// 6) consume data (in this case print result)
// 7) free memory
//
// #Compilation: nvcc -arch=sm_13 2_0_sum-vectors.cu -o sum-vectors-1
//
// #Execution: ./sum-vectors-1
//
// #Note: kernel invocations ( foo<<<...>>>(...) ) are *always* asynchronous and a call to
// cudaThreadSynchronize() is required to wait for the end of kernel execution from
// a host thread; in case of synchronous copy operations like cudaMemcpy(...,cudaDeviceToHost)
// kernel execution is guaranteed to be terminated before data are copied
//
// #Note: the code is C++ also because the default compilation mode for CUDA is C++, all functions
// are named with C++ convention and the syntax is checked by default against C++ grammar rules
//
// #Note: -arch=sm_13 allows the code to run on every card with hw architecture GT200 (gtx 2xx) or better
//
// #Note: -arch=sm_13 is the lowest architecture version that supports double precision
//
// #Note: the example can be extended to read configuration data and array size from the command line
// and could be timed to investigate how performance is dependent on single/double precision
// and thread block size
//#include <cuda_runtime.h> // automatically added by nvcc
#include <vector>
#include <iostream>
#include <iomanip>
#include <sstream>
#include <string>
typedef float real_t;
// In this case the kernel assumes that the computation was started with enough threads to cover the entire domain.
// This is the preferred solution provided there are enough threads to cover the entire domain which might not be the
// case in case of a 1D grid layout (max number of threads = 512 threads per block x 65536 blocks = 2^25 = 32 Mi threads)
__global__ void sum_vectors( const real_t* v1, const real_t* v2, real_t* out, size_t num_elements ) {
// compute current thread id
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
// since we assume that num threads >= num element we need to make sure we do not write outside the
// range of the output buffer
if( xIndex < num_elements ) out[ xIndex ] = v1[ xIndex ] + v2[ xIndex ];
}
//------------------------------------------------------------------------------
int main( int , char** ) {
const int VECTOR_SIZE = 0x10000 + 1; //vector size 65537
const int SIZE = sizeof( real_t ) * VECTOR_SIZE; // total size in bytes
const int THREADS_PER_BLOCK = 32; //number of gpu threads per block
// block size: the number of threads per block multiplied by the number of blocks
// must be at least equal to NUMBER_OF_THREADS
const int NUMBER_OF_BLOCKS = ( VECTOR_SIZE + THREADS_PER_BLOCK - 1 ) / THREADS_PER_BLOCK;
// if number of threads is not evenly divisable by the number of threads per block
// we need an additional block; the above code can be rewritten as
// if( NUMBER_OF_THREADS % THREADS_PER_BLOCK == 0) BLOCK_SIZE = NUMBER_OF_THREADS / THREADS_PER_BLOCK;
// else BLOCK_SIZE = NUMBER_OF_THREADS / THREADS_PER_BLOCK + 1
// host allocated storage; use std vectors to simplify memory management
// and initialization
std::vector< real_t > v1 ( VECTOR_SIZE, 1.f ); //initialize all elements to 1
std::vector< real_t > v2 ( VECTOR_SIZE, 2.f ); //initialize all elements to 2
std::vector< real_t > vout( VECTOR_SIZE, 0.f ); //initialize all elements to 0
// gpu allocated storage
real_t* dev_in1 = 0; //vector 1
real_t* dev_in2 = 0; //vector 2
real_t* dev_out = 0; //result value
cudaMalloc( &dev_in1, SIZE );
cudaMalloc( &dev_in2, SIZE );
cudaMalloc( &dev_out, SIZE );
// copy data to GPU
cudaMemcpy( dev_in1, &v1[ 0 ], SIZE, cudaMemcpyHostToDevice );
cudaMemcpy( dev_in2, &v2[ 0 ], SIZE, cudaMemcpyHostToDevice );
// execute kernel with num threads >= num elements
sum_vectors<<<NUMBER_OF_BLOCKS, THREADS_PER_BLOCK>>>( dev_in1, dev_in2, dev_out, VECTOR_SIZE );
// read back result
cudaMemcpy( &vout[ 0 ], dev_out, SIZE, cudaMemcpyDeviceToHost );
// print first and last element of vector
std::cout << "result: " << vout.front() << ".." << vout.back() << std::endl;
// free memory
cudaFree( dev_in1 );
cudaFree( dev_in2 );
cudaFree( dev_out );
return 0;
}
|
0ccccb0d66fdf6769f4ea843d6e9663ac6baacb4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <chrono>
#include <ctime>
#include <limits.h>
using namespace std;
__global__
void cadd(int n, float*x, float *y){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i+= stride){
y[i] = x[i] + y[i];
}
}
float* add(int n, float *x, float *y){
for(int i = 0; i < n; i++){
y[i] = x[i] + y[i];
}
return y;
}
int main(void){
int N = 1<<20;//INT_MAX/10;
float *x = new float[N];
float *y = new float[N];
for(int i =0; i < N; i++){
x[i] = i+1.0f;
y[i] = i + 2.0f;
}
//CPU
auto start = chrono::system_clock::now();
float *z = add(N,x,y);
auto end = chrono::system_clock::now();
chrono::duration<double> timevar = end-start;
std::cout << "Time to add: " << timevar.count() << std::endl;
// for(int i = 0; i < sizeof(*z); i ++){
// std::cout << x[i] << " + " << y[i] << " = " << z[i] << endl;
// }
delete [] x;
delete [] y;
//GPU
auto cstart = chrono::system_clock::now();
float *cx, *cy;
hipMallocManaged(&cx, N*sizeof(float));
hipMallocManaged(&cy, N*sizeof(float));
for(int i =0; i <N;i++){
cx[i] = i + 1.0f;
cy[i] = i + 2.0f;
}
int blocksize = 256;
int numBlocks = (N + blocksize -1)/blocksize;
hipLaunchKernelGGL(( cadd), dim3(numBlocks), dim3(blocksize), 0, 0, N,x,y);
hipDeviceSynchronize();
auto cend = chrono::system_clock::now();
chrono::duration<double> ctimevar = cend-cstart;
cout << "time to complete: " << ctimevar.count() << " seconds" << endl;
// for(int i =0; i < sizeof(*z); i++){
// cout << cx[i] << " + " << cy[i] << " = " << cz[i] << endl;
// }
hipFree(x);
hipFree(y);
return 0;
}
|
0ccccb0d66fdf6769f4ea843d6e9663ac6baacb4.cu
|
#include <iostream>
#include <math.h>
#include <chrono>
#include <ctime>
#include <limits.h>
using namespace std;
__global__
void cadd(int n, float*x, float *y){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i+= stride){
y[i] = x[i] + y[i];
}
}
float* add(int n, float *x, float *y){
for(int i = 0; i < n; i++){
y[i] = x[i] + y[i];
}
return y;
}
int main(void){
int N = 1<<20;//INT_MAX/10;
float *x = new float[N];
float *y = new float[N];
for(int i =0; i < N; i++){
x[i] = i+1.0f;
y[i] = i + 2.0f;
}
//CPU
auto start = chrono::system_clock::now();
float *z = add(N,x,y);
auto end = chrono::system_clock::now();
chrono::duration<double> timevar = end-start;
std::cout << "Time to add: " << timevar.count() << std::endl;
// for(int i = 0; i < sizeof(*z); i ++){
// std::cout << x[i] << " + " << y[i] << " = " << z[i] << endl;
// }
delete [] x;
delete [] y;
//GPU
auto cstart = chrono::system_clock::now();
float *cx, *cy;
cudaMallocManaged(&cx, N*sizeof(float));
cudaMallocManaged(&cy, N*sizeof(float));
for(int i =0; i <N;i++){
cx[i] = i + 1.0f;
cy[i] = i + 2.0f;
}
int blocksize = 256;
int numBlocks = (N + blocksize -1)/blocksize;
cadd<<<numBlocks, blocksize>>>(N,x,y);
cudaDeviceSynchronize();
auto cend = chrono::system_clock::now();
chrono::duration<double> ctimevar = cend-cstart;
cout << "time to complete: " << ctimevar.count() << " seconds" << endl;
// for(int i =0; i < sizeof(*z); i++){
// cout << cx[i] << " + " << cy[i] << " = " << cz[i] << endl;
// }
cudaFree(x);
cudaFree(y);
return 0;
}
|
e3bda4a62c38fcc7c457fec397294d4d0733d784.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mean_array_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
float alpha = 2;
float *avg = NULL;
hipMalloc(&avg, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mean_array_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,size,alpha,avg);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mean_array_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,size,alpha,avg);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mean_array_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,size,alpha,avg);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
e3bda4a62c38fcc7c457fec397294d4d0733d784.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mean_array_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
float alpha = 2;
float *avg = NULL;
cudaMalloc(&avg, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mean_array_kernel<<<gridBlock,threadBlock>>>(src,size,alpha,avg);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mean_array_kernel<<<gridBlock,threadBlock>>>(src,size,alpha,avg);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mean_array_kernel<<<gridBlock,threadBlock>>>(src,size,alpha,avg);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
721522dfe7d024eeee8c251c3cc1bce93ee591de.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
721522dfe7d024eeee8c251c3cc1bce93ee591de.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
9cb67dc4af42ffd78f5bc1d4d486222d6459b590.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
template <typename T> __global__ void kernelgpuSource(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uinf, T *param, T time, int modelnumber, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i<ng) {
T param1 = param[0];
T param5 = param[4];
T param6 = param[5];
T param12 = param[11];
T param14 = param[13];
T param15 = param[14];
T param16 = param[15];
T uinf1 = uinf[0];
T uinf2 = uinf[1];
T uinf3 = uinf[2];
T uinf4 = uinf[3];
T uinf5 = uinf[4];
T uinf6 = uinf[5];
T uinf7 = uinf[6];
T uinf8 = uinf[7];
T uinf9 = uinf[8];
T uinf10 = uinf[9];
T uinf11 = uinf[10];
T uinf12 = uinf[11];
T uinf13 = uinf[12];
T uinf14 = uinf[13];
T uinf15 = uinf[14];
T uinf16 = uinf[15];
T uinf17 = uinf[16];
T uinf18 = uinf[17];
T uinf19 = uinf[18];
T uinf20 = uinf[19];
T uinf21 = uinf[20];
T uinf22 = uinf[21];
T uinf23 = uinf[22];
T uinf24 = uinf[23];
T uinf25 = uinf[24];
T uinf26 = uinf[25];
T uinf27 = uinf[26];
T uinf28 = uinf[27];
T uinf29 = uinf[28];
T uinf30 = uinf[29];
T uinf31 = uinf[30];
T uinf32 = uinf[31];
T uinf33 = uinf[32];
T uinf34 = uinf[33];
T uinf35 = uinf[34];
T uinf36 = uinf[35];
T uinf37 = uinf[36];
T uinf38 = uinf[37];
T uinf39 = uinf[38];
T uinf40 = uinf[39];
T uinf41 = uinf[40];
T uinf42 = uinf[41];
T uinf43 = uinf[42];
T uinf44 = uinf[43];
T uinf45 = uinf[44];
T uinf46 = uinf[45];
T uinf47 = uinf[46];
T uinf48 = uinf[47];
T uinf49 = uinf[48];
T uinf50 = uinf[49];
T uinf51 = uinf[50];
T uinf52 = uinf[51];
T uinf53 = uinf[52];
T uinf54 = uinf[53];
T uinf55 = uinf[54];
T uinf56 = uinf[55];
T uinf57 = uinf[56];
T uinf58 = uinf[57];
T uinf59 = uinf[58];
T uinf60 = uinf[59];
T uinf61 = uinf[60];
T uinf62 = uinf[61];
T uinf63 = uinf[62];
T uinf64 = uinf[63];
T uinf65 = uinf[64];
T uinf66 = uinf[65];
T uinf67 = uinf[66];
T uinf68 = uinf[67];
T uinf69 = uinf[68];
T uinf70 = uinf[69];
T uinf71 = uinf[70];
T uinf72 = uinf[71];
T uinf73 = uinf[72];
T uinf74 = uinf[73];
T uinf75 = uinf[74];
T uinf76 = uinf[75];
T uinf77 = uinf[76];
T uinf78 = uinf[77];
T uinf79 = uinf[78];
T uinf80 = uinf[79];
T uinf81 = uinf[80];
T uinf82 = uinf[81];
T uinf83 = uinf[82];
T uinf84 = uinf[83];
T uinf85 = uinf[84];
T uinf86 = uinf[85];
T uinf87 = uinf[86];
T uinf88 = uinf[87];
T uinf89 = uinf[88];
T uinf90 = uinf[89];
T uinf91 = uinf[90];
T uinf92 = uinf[91];
T uinf93 = uinf[92];
T uinf94 = uinf[93];
T uinf95 = uinf[94];
T uinf96 = uinf[95];
T uinf97 = uinf[96];
T uinf98 = uinf[97];
T uinf99 = uinf[98];
T uinf100 = uinf[99];
T uinf101 = uinf[100];
T uinf102 = uinf[101];
T uinf103 = uinf[102];
T uinf104 = uinf[103];
T uinf105 = uinf[104];
T uinf106 = uinf[105];
T uinf107 = uinf[106];
T uinf108 = uinf[107];
T uinf109 = uinf[108];
T uinf110 = uinf[109];
T uinf111 = uinf[110];
T uinf112 = uinf[111];
T uinf113 = uinf[112];
T uinf114 = uinf[113];
T uinf115 = uinf[114];
T uinf116 = uinf[115];
T uinf117 = uinf[116];
T uinf118 = uinf[117];
T uinf119 = uinf[118];
T uinf120 = uinf[119];
T uinf121 = uinf[120];
T uinf122 = uinf[121];
T uinf123 = uinf[122];
T uinf124 = uinf[123];
T uinf125 = uinf[124];
T uinf126 = uinf[125];
T uinf127 = uinf[126];
T uinf128 = uinf[127];
T uinf129 = uinf[128];
T uinf130 = uinf[129];
T uinf131 = uinf[130];
T uinf132 = uinf[131];
T uinf133 = uinf[132];
T uinf134 = uinf[133];
T uinf135 = uinf[134];
T uinf136 = uinf[135];
T uinf137 = uinf[136];
T uinf138 = uinf[137];
T uinf139 = uinf[138];
T uinf140 = uinf[139];
T uinf141 = uinf[140];
T uinf142 = uinf[141];
T uinf143 = uinf[142];
T uinf144 = uinf[143];
T uinf145 = uinf[144];
T uinf146 = uinf[145];
T uinf147 = uinf[146];
T uinf148 = uinf[147];
T xdg1 = xdg[0*ng+i];
T xdg2 = xdg[1*ng+i];
T udg1 = udg[0*ng+i];
T udg2 = udg[1*ng+i];
T udg3 = udg[2*ng+i];
T udg4 = udg[3*ng+i];
T t2 = param6*param6;
T t3 = param12*param12;
T t4 = xdg1*xdg1;
T t5 = xdg2*xdg2;
T t6 = t4+t5;
T t7 = 1.0/pow(t6,3.0/2.0);
T t8 = udg2*xdg1;
T t9 = udg3*xdg2;
T t10 = t8+t9;
T t12 = sqrt(t6);
T t11 = param12-t12+6.5E1;
T t13 = t11*t11;
T t14 = 1.0/param14;
T t15 = 1.0/sqrt(t6);
T t16 = t15*xdg1;
T t17 = acos(t16);
T t18 = param6*time;
T t19 = xdg2*1.0E3;
T t20 = tanh(t19);
T t21 = t20-1.0;
T t22 = t21*(t17-3.141592653589793);
T t23 = t17+t18+t22;
T t24 = cos(t23);
T t25 = param12*1.0E3;
T t26 = t24*t24;
T t27 = -t26+1.0;
T t28 = sqrt(t27);
T t54 = t12*t28*1.0E3;
T t29 = t25-t54;
T t30 = tanh(t29);
T t31 = 1.0/(udg1*udg1);
T t32 = udg2*udg2;
T t33 = t31*t32*(1.0/2.0);
T t34 = udg3*udg3;
T t35 = t31*t34*(1.0/2.0);
T t36 = t33+t35;
T t39 = t36*udg1;
T t37 = -t39+udg4;
T t38 = fabs(t24);
T t40 = 1.0/t37;
T t41 = param1-1.0;
T t42 = 1.0/t41;
T t43 = param5*t3*t15*t40*t42*udg1*(1.0/2.0);
T t44 = sqrt(t43);
T t45 = t38*t44*1.0E3;
T t46 = t45-8.0E3;
T t47 = tanh(t46);
T t48 = 1.0/param5;
T t49 = 1.0/(param12*param12);
T t50 = t24*1.0E3;
T t51 = tanh(t50);
T t52 = t51*(1.0/2.0);
T t53 = t52-1.0/2.0;
T t55 = t30*(1.0/2.0);
T t56 = t55-1.0/2.0;
T t57 = sqrt(3.141592653589793);
T t58 = t38*t44;
T t59 = t58+6.651874000000001E-2;
T t60 = 1.0/t59;
T t61 = t47*2.82494115E-1;
T t62 = t61+2.82494115E-1;
T t63 = t60*t62;
T t64 = t47*(1.0/2.0);
T t65 = t64-1.0/2.0;
T t66 = t38*t44*5.5643831E-1;
T t67 = t66+1.0606963;
T t68 = t38*t44*1.7245609;
T t69 = t38*t38;
T t70 = param5*t3*t15*t40*t42*t69*udg1*(1.0/2.0);
T t71 = t68+t70+1.0619896;
T t72 = 1.0/t71;
T t78 = t65*t67*t72;
T t73 = t63-t78;
T t74 = uinf75*2.0E4;
T t75 = t74+2.0E2;
T t76 = tanh(t75);
T t77 = t30*5.0E31;
T t79 = t6*t37*t41*t48*t49*t73;
T t80 = t28-1.0;
T t91 = param5*t3*t15*t40*t42*t53*t56*t80*udg1;
T t81 = exp(-t91);
T t92 = t6*t37*t41*t48*t49*t81*2.0;
T t82 = t79-t92;
T t83 = t44*t56*t57*t82;
T t84 = t77+t83+5.0E31;
T t85 = t53*t84;
T t86 = t52+1.0/2.0;
T t93 = t6*t37*t41*t44*t48*t49*t57*t73*t86;
T t87 = t85-t93;
T t88 = uinf76*2.0E4;
T t89 = t88+2.0E2;
T t90 = tanh(t89);
T t94 = uinf77*2.0E4;
T t95 = t94+2.0E2;
T t96 = tanh(t95);
T t97 = uinf78*2.0E4;
T t98 = t97+2.0E2;
T t99 = tanh(t98);
T t100 = uinf79*2.0E4;
T t101 = t100+2.0E2;
T t102 = tanh(t101);
T t103 = uinf80*2.0E4;
T t104 = t103+2.0E2;
T t105 = tanh(t104);
T t106 = uinf81*2.0E4;
T t107 = t106+2.0E2;
T t108 = tanh(t107);
T t109 = uinf82*2.0E4;
T t110 = t109+2.0E2;
T t111 = tanh(t110);
T t112 = uinf83*2.0E4;
T t113 = t112+2.0E2;
T t114 = tanh(t113);
T t115 = uinf84*2.0E4;
T t116 = t115+2.0E2;
T t117 = tanh(t116);
T t118 = uinf85*2.0E4;
T t119 = t118+2.0E2;
T t120 = tanh(t119);
T t121 = uinf86*2.0E4;
T t122 = t121+2.0E2;
T t123 = tanh(t122);
T t124 = uinf87*2.0E4;
T t125 = t124+2.0E2;
T t126 = tanh(t125);
T t127 = uinf88*2.0E4;
T t128 = t127+2.0E2;
T t129 = tanh(t128);
T t130 = uinf89*2.0E4;
T t131 = t130+2.0E2;
T t132 = tanh(t131);
T t133 = uinf90*2.0E4;
T t134 = t133+2.0E2;
T t135 = tanh(t134);
T t136 = uinf91*2.0E4;
T t137 = t136+2.0E2;
T t138 = tanh(t137);
T t139 = uinf92*2.0E4;
T t140 = t139+2.0E2;
T t141 = tanh(t140);
T t142 = uinf93*2.0E4;
T t143 = t142+2.0E2;
T t144 = tanh(t143);
T t145 = uinf94*2.0E4;
T t146 = t145+2.0E2;
T t147 = tanh(t146);
T t148 = uinf95*2.0E4;
T t149 = t148+2.0E2;
T t150 = tanh(t149);
T t151 = uinf96*2.0E4;
T t152 = t151+2.0E2;
T t153 = tanh(t152);
T t154 = uinf97*2.0E4;
T t155 = t154+2.0E2;
T t156 = tanh(t155);
T t157 = uinf98*2.0E4;
T t158 = t157+2.0E2;
T t159 = tanh(t158);
T t160 = uinf99*2.0E4;
T t161 = t160+2.0E2;
T t162 = tanh(t161);
T t163 = uinf100*2.0E4;
T t164 = t163+2.0E2;
T t165 = tanh(t164);
T t166 = uinf101*2.0E4;
T t167 = t166+2.0E2;
T t168 = tanh(t167);
T t169 = uinf102*2.0E4;
T t170 = t169+2.0E2;
T t171 = tanh(t170);
T t172 = uinf103*2.0E4;
T t173 = t172+2.0E2;
T t174 = tanh(t173);
T t175 = uinf104*2.0E4;
T t176 = t175+2.0E2;
T t177 = tanh(t176);
T t178 = uinf105*2.0E4;
T t179 = t178+2.0E2;
T t180 = tanh(t179);
T t181 = uinf106*2.0E4;
T t182 = t181+2.0E2;
T t183 = tanh(t182);
T t184 = uinf107*2.0E4;
T t185 = t184+2.0E2;
T t186 = tanh(t185);
T t187 = uinf108*2.0E4;
T t188 = t187+2.0E2;
T t189 = tanh(t188);
T t190 = uinf109*2.0E4;
T t191 = t190+2.0E2;
T t192 = tanh(t191);
T t193 = uinf110*2.0E4;
T t194 = t193+2.0E2;
T t195 = tanh(t194);
T t196 = uinf111*2.0E4;
T t197 = t196+2.0E2;
T t198 = tanh(t197);
f[0*ng+i] = 0.0;
f[1*ng+i] = param6*udg3*2.0+t2*udg1*xdg1-param5*t3*t7*udg1*xdg1;
f[2*ng+i] = param6*udg2*-2.0+t2*udg1*xdg2-param5*t3*t7*udg1*xdg2;
f[3*ng+i] = t2*t10-param5*t3*t7*t10+param15*param16*t14*udg1*((tanh(t13*5.54E-2-4.0E2)*(1.0/2.0)-1.0/2.0)*(t13*5.54E-5-2.0/5.0)+1.0/5.0)*((uinf38*uinf112*exp(t14*t87*uinf38)*(t76*(-2.0/5.0)+(t76*(1.0/2.0)+1.0/2.0)*(uinf75*2.0E1+1.0)+2.0/5.0))/uinf1+(uinf39*uinf113*exp(t14*t87*uinf39)*(t90*(-2.0/5.0)+(t90*(1.0/2.0)+1.0/2.0)*(uinf76*2.0E1+1.0)+2.0/5.0))/uinf2+(uinf40*uinf114*exp(t14*t87*uinf40)*(t96*(-2.0/5.0)+(t96*(1.0/2.0)+1.0/2.0)*(uinf77*2.0E1+1.0)+2.0/5.0))/uinf3+(uinf41*uinf115*exp(t14*t87*uinf41)*(t99*(-2.0/5.0)+(t99*(1.0/2.0)+1.0/2.0)*(uinf78*2.0E1+1.0)+2.0/5.0))/uinf4+(uinf42*uinf116*exp(t14*t87*uinf42)*(t102*(-2.0/5.0)+(t102*(1.0/2.0)+1.0/2.0)*(uinf79*2.0E1+1.0)+2.0/5.0))/uinf5+(uinf43*uinf117*exp(t14*t87*uinf43)*(t105*(-2.0/5.0)+(t105*(1.0/2.0)+1.0/2.0)*(uinf80*2.0E1+1.0)+2.0/5.0))/uinf6+(uinf44*uinf118*exp(t14*t87*uinf44)*(t108*(-2.0/5.0)+(t108*(1.0/2.0)+1.0/2.0)*(uinf81*2.0E1+1.0)+2.0/5.0))/uinf7+(uinf45*uinf119*exp(t14*t87*uinf45)*(t111*(-2.0/5.0)+(t111*(1.0/2.0)+1.0/2.0)*(uinf82*2.0E1+1.0)+2.0/5.0))/uinf8+(uinf46*uinf120*exp(t14*t87*uinf46)*(t114*(-2.0/5.0)+(t114*(1.0/2.0)+1.0/2.0)*(uinf83*2.0E1+1.0)+2.0/5.0))/uinf9+(uinf47*uinf121*exp(t14*t87*uinf47)*(t117*(-2.0/5.0)+(t117*(1.0/2.0)+1.0/2.0)*(uinf84*2.0E1+1.0)+2.0/5.0))/uinf10+(uinf48*uinf122*exp(t14*t87*uinf48)*(t120*(-2.0/5.0)+(t120*(1.0/2.0)+1.0/2.0)*(uinf85*2.0E1+1.0)+2.0/5.0))/uinf11+(uinf49*uinf123*exp(t14*t87*uinf49)*(t123*(-2.0/5.0)+(t123*(1.0/2.0)+1.0/2.0)*(uinf86*2.0E1+1.0)+2.0/5.0))/uinf12+(uinf50*uinf124*exp(t14*t87*uinf50)*(t126*(-2.0/5.0)+(t126*(1.0/2.0)+1.0/2.0)*(uinf87*2.0E1+1.0)+2.0/5.0))/uinf13+(uinf51*uinf125*exp(t14*t87*uinf51)*(t129*(-2.0/5.0)+(t129*(1.0/2.0)+1.0/2.0)*(uinf88*2.0E1+1.0)+2.0/5.0))/uinf14+(uinf52*uinf126*exp(t14*t87*uinf52)*(t132*(-2.0/5.0)+(t132*(1.0/2.0)+1.0/2.0)*(uinf89*2.0E1+1.0)+2.0/5.0))/uinf15+(uinf53*uinf127*exp(t14*t87*uinf53)*(t135*(-2.0/5.0)+(t135*(1.0/2.0)+1.0/2.0)*(uinf90*2.0E1+1.0)+2.0/5.0))/uinf16+(uinf54*uinf128*exp(t14*t87*uinf54)*(t138*(-2.0/5.0)+(t138*(1.0/2.0)+1.0/2.0)*(uinf91*2.0E1+1.0)+2.0/5.0))/uinf17+(uinf55*uinf129*exp(t14*t87*uinf55)*(t141*(-2.0/5.0)+(t141*(1.0/2.0)+1.0/2.0)*(uinf92*2.0E1+1.0)+2.0/5.0))/uinf18+(uinf56*uinf130*exp(t14*t87*uinf56)*(t144*(-2.0/5.0)+(t144*(1.0/2.0)+1.0/2.0)*(uinf93*2.0E1+1.0)+2.0/5.0))/uinf19+(uinf57*uinf131*exp(t14*t87*uinf57)*(t147*(-2.0/5.0)+(t147*(1.0/2.0)+1.0/2.0)*(uinf94*2.0E1+1.0)+2.0/5.0))/uinf20+(uinf58*uinf132*exp(t14*t87*uinf58)*(t150*(-2.0/5.0)+(t150*(1.0/2.0)+1.0/2.0)*(uinf95*2.0E1+1.0)+2.0/5.0))/uinf21+(uinf59*uinf133*exp(t14*t87*uinf59)*(t153*(-2.0/5.0)+(t153*(1.0/2.0)+1.0/2.0)*(uinf96*2.0E1+1.0)+2.0/5.0))/uinf22+(uinf60*uinf134*exp(t14*t87*uinf60)*(t156*(-2.0/5.0)+(t156*(1.0/2.0)+1.0/2.0)*(uinf97*2.0E1+1.0)+2.0/5.0))/uinf23+(uinf61*uinf135*exp(t14*t87*uinf61)*(t159*(-2.0/5.0)+(t159*(1.0/2.0)+1.0/2.0)*(uinf98*2.0E1+1.0)+2.0/5.0))/uinf24+(uinf62*uinf136*exp(t14*t87*uinf62)*(t162*(-2.0/5.0)+(t162*(1.0/2.0)+1.0/2.0)*(uinf99*2.0E1+1.0)+2.0/5.0))/uinf25+(uinf63*uinf137*exp(t14*t87*uinf63)*(t165*(-2.0/5.0)+(t165*(1.0/2.0)+1.0/2.0)*(uinf100*2.0E1+1.0)+2.0/5.0))/uinf26+(uinf64*uinf138*exp(t14*t87*uinf64)*(t168*(-2.0/5.0)+(t168*(1.0/2.0)+1.0/2.0)*(uinf101*2.0E1+1.0)+2.0/5.0))/uinf27+(uinf65*uinf139*exp(t14*t87*uinf65)*(t171*(-2.0/5.0)+(t171*(1.0/2.0)+1.0/2.0)*(uinf102*2.0E1+1.0)+2.0/5.0))/uinf28+(uinf66*uinf140*exp(t14*t87*uinf66)*(t174*(-2.0/5.0)+(t174*(1.0/2.0)+1.0/2.0)*(uinf103*2.0E1+1.0)+2.0/5.0))/uinf29+(uinf67*uinf141*exp(t14*t87*uinf67)*(t177*(-2.0/5.0)+(t177*(1.0/2.0)+1.0/2.0)*(uinf104*2.0E1+1.0)+2.0/5.0))/uinf30+(uinf68*uinf142*exp(t14*t87*uinf68)*(t180*(-2.0/5.0)+(t180*(1.0/2.0)+1.0/2.0)*(uinf105*2.0E1+1.0)+2.0/5.0))/uinf31+(uinf69*uinf143*exp(t14*t87*uinf69)*(t183*(-2.0/5.0)+(t183*(1.0/2.0)+1.0/2.0)*(uinf106*2.0E1+1.0)+2.0/5.0))/uinf32+(uinf70*uinf144*exp(t14*t87*uinf70)*(t186*(-2.0/5.0)+(t186*(1.0/2.0)+1.0/2.0)*(uinf107*2.0E1+1.0)+2.0/5.0))/uinf33+(uinf71*uinf145*exp(t14*t87*uinf71)*(t189*(-2.0/5.0)+(t189*(1.0/2.0)+1.0/2.0)*(uinf108*2.0E1+1.0)+2.0/5.0))/uinf34+(uinf72*uinf146*exp(t14*t87*uinf72)*(t192*(-2.0/5.0)+(t192*(1.0/2.0)+1.0/2.0)*(uinf109*2.0E1+1.0)+2.0/5.0))/uinf35+(uinf73*uinf147*exp(t14*t87*uinf73)*(t195*(-2.0/5.0)+(t195*(1.0/2.0)+1.0/2.0)*(uinf110*2.0E1+1.0)+2.0/5.0))/uinf36+(uinf74*uinf148*exp(t14*t87*uinf74)*(t198*(-2.0/5.0)+(t198*(1.0/2.0)+1.0/2.0)*(uinf111*2.0E1+1.0)+2.0/5.0))/uinf37);
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuSource(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uinf, T *param, T time, int modelnumber, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw)
{
int blockDim = 256;
int gridDim = (ng + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( kernelgpuSource), dim3(gridDim), dim3(blockDim), 0, 0, f, xdg, udg, odg, wdg, uinf, param, time, modelnumber, ng, nc, ncu, nd, ncx, nco, ncw);
}
template void gpuSource(double *, double *, double *, double *, double *, double *, double *, double, int, int, int, int, int, int, int, int);
template void gpuSource(float *, float *, float *, float *, float *, float *, float *, float, int, int, int, int, int, int, int, int);
|
9cb67dc4af42ffd78f5bc1d4d486222d6459b590.cu
|
template <typename T> __global__ void kernelgpuSource(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uinf, T *param, T time, int modelnumber, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i<ng) {
T param1 = param[0];
T param5 = param[4];
T param6 = param[5];
T param12 = param[11];
T param14 = param[13];
T param15 = param[14];
T param16 = param[15];
T uinf1 = uinf[0];
T uinf2 = uinf[1];
T uinf3 = uinf[2];
T uinf4 = uinf[3];
T uinf5 = uinf[4];
T uinf6 = uinf[5];
T uinf7 = uinf[6];
T uinf8 = uinf[7];
T uinf9 = uinf[8];
T uinf10 = uinf[9];
T uinf11 = uinf[10];
T uinf12 = uinf[11];
T uinf13 = uinf[12];
T uinf14 = uinf[13];
T uinf15 = uinf[14];
T uinf16 = uinf[15];
T uinf17 = uinf[16];
T uinf18 = uinf[17];
T uinf19 = uinf[18];
T uinf20 = uinf[19];
T uinf21 = uinf[20];
T uinf22 = uinf[21];
T uinf23 = uinf[22];
T uinf24 = uinf[23];
T uinf25 = uinf[24];
T uinf26 = uinf[25];
T uinf27 = uinf[26];
T uinf28 = uinf[27];
T uinf29 = uinf[28];
T uinf30 = uinf[29];
T uinf31 = uinf[30];
T uinf32 = uinf[31];
T uinf33 = uinf[32];
T uinf34 = uinf[33];
T uinf35 = uinf[34];
T uinf36 = uinf[35];
T uinf37 = uinf[36];
T uinf38 = uinf[37];
T uinf39 = uinf[38];
T uinf40 = uinf[39];
T uinf41 = uinf[40];
T uinf42 = uinf[41];
T uinf43 = uinf[42];
T uinf44 = uinf[43];
T uinf45 = uinf[44];
T uinf46 = uinf[45];
T uinf47 = uinf[46];
T uinf48 = uinf[47];
T uinf49 = uinf[48];
T uinf50 = uinf[49];
T uinf51 = uinf[50];
T uinf52 = uinf[51];
T uinf53 = uinf[52];
T uinf54 = uinf[53];
T uinf55 = uinf[54];
T uinf56 = uinf[55];
T uinf57 = uinf[56];
T uinf58 = uinf[57];
T uinf59 = uinf[58];
T uinf60 = uinf[59];
T uinf61 = uinf[60];
T uinf62 = uinf[61];
T uinf63 = uinf[62];
T uinf64 = uinf[63];
T uinf65 = uinf[64];
T uinf66 = uinf[65];
T uinf67 = uinf[66];
T uinf68 = uinf[67];
T uinf69 = uinf[68];
T uinf70 = uinf[69];
T uinf71 = uinf[70];
T uinf72 = uinf[71];
T uinf73 = uinf[72];
T uinf74 = uinf[73];
T uinf75 = uinf[74];
T uinf76 = uinf[75];
T uinf77 = uinf[76];
T uinf78 = uinf[77];
T uinf79 = uinf[78];
T uinf80 = uinf[79];
T uinf81 = uinf[80];
T uinf82 = uinf[81];
T uinf83 = uinf[82];
T uinf84 = uinf[83];
T uinf85 = uinf[84];
T uinf86 = uinf[85];
T uinf87 = uinf[86];
T uinf88 = uinf[87];
T uinf89 = uinf[88];
T uinf90 = uinf[89];
T uinf91 = uinf[90];
T uinf92 = uinf[91];
T uinf93 = uinf[92];
T uinf94 = uinf[93];
T uinf95 = uinf[94];
T uinf96 = uinf[95];
T uinf97 = uinf[96];
T uinf98 = uinf[97];
T uinf99 = uinf[98];
T uinf100 = uinf[99];
T uinf101 = uinf[100];
T uinf102 = uinf[101];
T uinf103 = uinf[102];
T uinf104 = uinf[103];
T uinf105 = uinf[104];
T uinf106 = uinf[105];
T uinf107 = uinf[106];
T uinf108 = uinf[107];
T uinf109 = uinf[108];
T uinf110 = uinf[109];
T uinf111 = uinf[110];
T uinf112 = uinf[111];
T uinf113 = uinf[112];
T uinf114 = uinf[113];
T uinf115 = uinf[114];
T uinf116 = uinf[115];
T uinf117 = uinf[116];
T uinf118 = uinf[117];
T uinf119 = uinf[118];
T uinf120 = uinf[119];
T uinf121 = uinf[120];
T uinf122 = uinf[121];
T uinf123 = uinf[122];
T uinf124 = uinf[123];
T uinf125 = uinf[124];
T uinf126 = uinf[125];
T uinf127 = uinf[126];
T uinf128 = uinf[127];
T uinf129 = uinf[128];
T uinf130 = uinf[129];
T uinf131 = uinf[130];
T uinf132 = uinf[131];
T uinf133 = uinf[132];
T uinf134 = uinf[133];
T uinf135 = uinf[134];
T uinf136 = uinf[135];
T uinf137 = uinf[136];
T uinf138 = uinf[137];
T uinf139 = uinf[138];
T uinf140 = uinf[139];
T uinf141 = uinf[140];
T uinf142 = uinf[141];
T uinf143 = uinf[142];
T uinf144 = uinf[143];
T uinf145 = uinf[144];
T uinf146 = uinf[145];
T uinf147 = uinf[146];
T uinf148 = uinf[147];
T xdg1 = xdg[0*ng+i];
T xdg2 = xdg[1*ng+i];
T udg1 = udg[0*ng+i];
T udg2 = udg[1*ng+i];
T udg3 = udg[2*ng+i];
T udg4 = udg[3*ng+i];
T t2 = param6*param6;
T t3 = param12*param12;
T t4 = xdg1*xdg1;
T t5 = xdg2*xdg2;
T t6 = t4+t5;
T t7 = 1.0/pow(t6,3.0/2.0);
T t8 = udg2*xdg1;
T t9 = udg3*xdg2;
T t10 = t8+t9;
T t12 = sqrt(t6);
T t11 = param12-t12+6.5E1;
T t13 = t11*t11;
T t14 = 1.0/param14;
T t15 = 1.0/sqrt(t6);
T t16 = t15*xdg1;
T t17 = acos(t16);
T t18 = param6*time;
T t19 = xdg2*1.0E3;
T t20 = tanh(t19);
T t21 = t20-1.0;
T t22 = t21*(t17-3.141592653589793);
T t23 = t17+t18+t22;
T t24 = cos(t23);
T t25 = param12*1.0E3;
T t26 = t24*t24;
T t27 = -t26+1.0;
T t28 = sqrt(t27);
T t54 = t12*t28*1.0E3;
T t29 = t25-t54;
T t30 = tanh(t29);
T t31 = 1.0/(udg1*udg1);
T t32 = udg2*udg2;
T t33 = t31*t32*(1.0/2.0);
T t34 = udg3*udg3;
T t35 = t31*t34*(1.0/2.0);
T t36 = t33+t35;
T t39 = t36*udg1;
T t37 = -t39+udg4;
T t38 = fabs(t24);
T t40 = 1.0/t37;
T t41 = param1-1.0;
T t42 = 1.0/t41;
T t43 = param5*t3*t15*t40*t42*udg1*(1.0/2.0);
T t44 = sqrt(t43);
T t45 = t38*t44*1.0E3;
T t46 = t45-8.0E3;
T t47 = tanh(t46);
T t48 = 1.0/param5;
T t49 = 1.0/(param12*param12);
T t50 = t24*1.0E3;
T t51 = tanh(t50);
T t52 = t51*(1.0/2.0);
T t53 = t52-1.0/2.0;
T t55 = t30*(1.0/2.0);
T t56 = t55-1.0/2.0;
T t57 = sqrt(3.141592653589793);
T t58 = t38*t44;
T t59 = t58+6.651874000000001E-2;
T t60 = 1.0/t59;
T t61 = t47*2.82494115E-1;
T t62 = t61+2.82494115E-1;
T t63 = t60*t62;
T t64 = t47*(1.0/2.0);
T t65 = t64-1.0/2.0;
T t66 = t38*t44*5.5643831E-1;
T t67 = t66+1.0606963;
T t68 = t38*t44*1.7245609;
T t69 = t38*t38;
T t70 = param5*t3*t15*t40*t42*t69*udg1*(1.0/2.0);
T t71 = t68+t70+1.0619896;
T t72 = 1.0/t71;
T t78 = t65*t67*t72;
T t73 = t63-t78;
T t74 = uinf75*2.0E4;
T t75 = t74+2.0E2;
T t76 = tanh(t75);
T t77 = t30*5.0E31;
T t79 = t6*t37*t41*t48*t49*t73;
T t80 = t28-1.0;
T t91 = param5*t3*t15*t40*t42*t53*t56*t80*udg1;
T t81 = exp(-t91);
T t92 = t6*t37*t41*t48*t49*t81*2.0;
T t82 = t79-t92;
T t83 = t44*t56*t57*t82;
T t84 = t77+t83+5.0E31;
T t85 = t53*t84;
T t86 = t52+1.0/2.0;
T t93 = t6*t37*t41*t44*t48*t49*t57*t73*t86;
T t87 = t85-t93;
T t88 = uinf76*2.0E4;
T t89 = t88+2.0E2;
T t90 = tanh(t89);
T t94 = uinf77*2.0E4;
T t95 = t94+2.0E2;
T t96 = tanh(t95);
T t97 = uinf78*2.0E4;
T t98 = t97+2.0E2;
T t99 = tanh(t98);
T t100 = uinf79*2.0E4;
T t101 = t100+2.0E2;
T t102 = tanh(t101);
T t103 = uinf80*2.0E4;
T t104 = t103+2.0E2;
T t105 = tanh(t104);
T t106 = uinf81*2.0E4;
T t107 = t106+2.0E2;
T t108 = tanh(t107);
T t109 = uinf82*2.0E4;
T t110 = t109+2.0E2;
T t111 = tanh(t110);
T t112 = uinf83*2.0E4;
T t113 = t112+2.0E2;
T t114 = tanh(t113);
T t115 = uinf84*2.0E4;
T t116 = t115+2.0E2;
T t117 = tanh(t116);
T t118 = uinf85*2.0E4;
T t119 = t118+2.0E2;
T t120 = tanh(t119);
T t121 = uinf86*2.0E4;
T t122 = t121+2.0E2;
T t123 = tanh(t122);
T t124 = uinf87*2.0E4;
T t125 = t124+2.0E2;
T t126 = tanh(t125);
T t127 = uinf88*2.0E4;
T t128 = t127+2.0E2;
T t129 = tanh(t128);
T t130 = uinf89*2.0E4;
T t131 = t130+2.0E2;
T t132 = tanh(t131);
T t133 = uinf90*2.0E4;
T t134 = t133+2.0E2;
T t135 = tanh(t134);
T t136 = uinf91*2.0E4;
T t137 = t136+2.0E2;
T t138 = tanh(t137);
T t139 = uinf92*2.0E4;
T t140 = t139+2.0E2;
T t141 = tanh(t140);
T t142 = uinf93*2.0E4;
T t143 = t142+2.0E2;
T t144 = tanh(t143);
T t145 = uinf94*2.0E4;
T t146 = t145+2.0E2;
T t147 = tanh(t146);
T t148 = uinf95*2.0E4;
T t149 = t148+2.0E2;
T t150 = tanh(t149);
T t151 = uinf96*2.0E4;
T t152 = t151+2.0E2;
T t153 = tanh(t152);
T t154 = uinf97*2.0E4;
T t155 = t154+2.0E2;
T t156 = tanh(t155);
T t157 = uinf98*2.0E4;
T t158 = t157+2.0E2;
T t159 = tanh(t158);
T t160 = uinf99*2.0E4;
T t161 = t160+2.0E2;
T t162 = tanh(t161);
T t163 = uinf100*2.0E4;
T t164 = t163+2.0E2;
T t165 = tanh(t164);
T t166 = uinf101*2.0E4;
T t167 = t166+2.0E2;
T t168 = tanh(t167);
T t169 = uinf102*2.0E4;
T t170 = t169+2.0E2;
T t171 = tanh(t170);
T t172 = uinf103*2.0E4;
T t173 = t172+2.0E2;
T t174 = tanh(t173);
T t175 = uinf104*2.0E4;
T t176 = t175+2.0E2;
T t177 = tanh(t176);
T t178 = uinf105*2.0E4;
T t179 = t178+2.0E2;
T t180 = tanh(t179);
T t181 = uinf106*2.0E4;
T t182 = t181+2.0E2;
T t183 = tanh(t182);
T t184 = uinf107*2.0E4;
T t185 = t184+2.0E2;
T t186 = tanh(t185);
T t187 = uinf108*2.0E4;
T t188 = t187+2.0E2;
T t189 = tanh(t188);
T t190 = uinf109*2.0E4;
T t191 = t190+2.0E2;
T t192 = tanh(t191);
T t193 = uinf110*2.0E4;
T t194 = t193+2.0E2;
T t195 = tanh(t194);
T t196 = uinf111*2.0E4;
T t197 = t196+2.0E2;
T t198 = tanh(t197);
f[0*ng+i] = 0.0;
f[1*ng+i] = param6*udg3*2.0+t2*udg1*xdg1-param5*t3*t7*udg1*xdg1;
f[2*ng+i] = param6*udg2*-2.0+t2*udg1*xdg2-param5*t3*t7*udg1*xdg2;
f[3*ng+i] = t2*t10-param5*t3*t7*t10+param15*param16*t14*udg1*((tanh(t13*5.54E-2-4.0E2)*(1.0/2.0)-1.0/2.0)*(t13*5.54E-5-2.0/5.0)+1.0/5.0)*((uinf38*uinf112*exp(t14*t87*uinf38)*(t76*(-2.0/5.0)+(t76*(1.0/2.0)+1.0/2.0)*(uinf75*2.0E1+1.0)+2.0/5.0))/uinf1+(uinf39*uinf113*exp(t14*t87*uinf39)*(t90*(-2.0/5.0)+(t90*(1.0/2.0)+1.0/2.0)*(uinf76*2.0E1+1.0)+2.0/5.0))/uinf2+(uinf40*uinf114*exp(t14*t87*uinf40)*(t96*(-2.0/5.0)+(t96*(1.0/2.0)+1.0/2.0)*(uinf77*2.0E1+1.0)+2.0/5.0))/uinf3+(uinf41*uinf115*exp(t14*t87*uinf41)*(t99*(-2.0/5.0)+(t99*(1.0/2.0)+1.0/2.0)*(uinf78*2.0E1+1.0)+2.0/5.0))/uinf4+(uinf42*uinf116*exp(t14*t87*uinf42)*(t102*(-2.0/5.0)+(t102*(1.0/2.0)+1.0/2.0)*(uinf79*2.0E1+1.0)+2.0/5.0))/uinf5+(uinf43*uinf117*exp(t14*t87*uinf43)*(t105*(-2.0/5.0)+(t105*(1.0/2.0)+1.0/2.0)*(uinf80*2.0E1+1.0)+2.0/5.0))/uinf6+(uinf44*uinf118*exp(t14*t87*uinf44)*(t108*(-2.0/5.0)+(t108*(1.0/2.0)+1.0/2.0)*(uinf81*2.0E1+1.0)+2.0/5.0))/uinf7+(uinf45*uinf119*exp(t14*t87*uinf45)*(t111*(-2.0/5.0)+(t111*(1.0/2.0)+1.0/2.0)*(uinf82*2.0E1+1.0)+2.0/5.0))/uinf8+(uinf46*uinf120*exp(t14*t87*uinf46)*(t114*(-2.0/5.0)+(t114*(1.0/2.0)+1.0/2.0)*(uinf83*2.0E1+1.0)+2.0/5.0))/uinf9+(uinf47*uinf121*exp(t14*t87*uinf47)*(t117*(-2.0/5.0)+(t117*(1.0/2.0)+1.0/2.0)*(uinf84*2.0E1+1.0)+2.0/5.0))/uinf10+(uinf48*uinf122*exp(t14*t87*uinf48)*(t120*(-2.0/5.0)+(t120*(1.0/2.0)+1.0/2.0)*(uinf85*2.0E1+1.0)+2.0/5.0))/uinf11+(uinf49*uinf123*exp(t14*t87*uinf49)*(t123*(-2.0/5.0)+(t123*(1.0/2.0)+1.0/2.0)*(uinf86*2.0E1+1.0)+2.0/5.0))/uinf12+(uinf50*uinf124*exp(t14*t87*uinf50)*(t126*(-2.0/5.0)+(t126*(1.0/2.0)+1.0/2.0)*(uinf87*2.0E1+1.0)+2.0/5.0))/uinf13+(uinf51*uinf125*exp(t14*t87*uinf51)*(t129*(-2.0/5.0)+(t129*(1.0/2.0)+1.0/2.0)*(uinf88*2.0E1+1.0)+2.0/5.0))/uinf14+(uinf52*uinf126*exp(t14*t87*uinf52)*(t132*(-2.0/5.0)+(t132*(1.0/2.0)+1.0/2.0)*(uinf89*2.0E1+1.0)+2.0/5.0))/uinf15+(uinf53*uinf127*exp(t14*t87*uinf53)*(t135*(-2.0/5.0)+(t135*(1.0/2.0)+1.0/2.0)*(uinf90*2.0E1+1.0)+2.0/5.0))/uinf16+(uinf54*uinf128*exp(t14*t87*uinf54)*(t138*(-2.0/5.0)+(t138*(1.0/2.0)+1.0/2.0)*(uinf91*2.0E1+1.0)+2.0/5.0))/uinf17+(uinf55*uinf129*exp(t14*t87*uinf55)*(t141*(-2.0/5.0)+(t141*(1.0/2.0)+1.0/2.0)*(uinf92*2.0E1+1.0)+2.0/5.0))/uinf18+(uinf56*uinf130*exp(t14*t87*uinf56)*(t144*(-2.0/5.0)+(t144*(1.0/2.0)+1.0/2.0)*(uinf93*2.0E1+1.0)+2.0/5.0))/uinf19+(uinf57*uinf131*exp(t14*t87*uinf57)*(t147*(-2.0/5.0)+(t147*(1.0/2.0)+1.0/2.0)*(uinf94*2.0E1+1.0)+2.0/5.0))/uinf20+(uinf58*uinf132*exp(t14*t87*uinf58)*(t150*(-2.0/5.0)+(t150*(1.0/2.0)+1.0/2.0)*(uinf95*2.0E1+1.0)+2.0/5.0))/uinf21+(uinf59*uinf133*exp(t14*t87*uinf59)*(t153*(-2.0/5.0)+(t153*(1.0/2.0)+1.0/2.0)*(uinf96*2.0E1+1.0)+2.0/5.0))/uinf22+(uinf60*uinf134*exp(t14*t87*uinf60)*(t156*(-2.0/5.0)+(t156*(1.0/2.0)+1.0/2.0)*(uinf97*2.0E1+1.0)+2.0/5.0))/uinf23+(uinf61*uinf135*exp(t14*t87*uinf61)*(t159*(-2.0/5.0)+(t159*(1.0/2.0)+1.0/2.0)*(uinf98*2.0E1+1.0)+2.0/5.0))/uinf24+(uinf62*uinf136*exp(t14*t87*uinf62)*(t162*(-2.0/5.0)+(t162*(1.0/2.0)+1.0/2.0)*(uinf99*2.0E1+1.0)+2.0/5.0))/uinf25+(uinf63*uinf137*exp(t14*t87*uinf63)*(t165*(-2.0/5.0)+(t165*(1.0/2.0)+1.0/2.0)*(uinf100*2.0E1+1.0)+2.0/5.0))/uinf26+(uinf64*uinf138*exp(t14*t87*uinf64)*(t168*(-2.0/5.0)+(t168*(1.0/2.0)+1.0/2.0)*(uinf101*2.0E1+1.0)+2.0/5.0))/uinf27+(uinf65*uinf139*exp(t14*t87*uinf65)*(t171*(-2.0/5.0)+(t171*(1.0/2.0)+1.0/2.0)*(uinf102*2.0E1+1.0)+2.0/5.0))/uinf28+(uinf66*uinf140*exp(t14*t87*uinf66)*(t174*(-2.0/5.0)+(t174*(1.0/2.0)+1.0/2.0)*(uinf103*2.0E1+1.0)+2.0/5.0))/uinf29+(uinf67*uinf141*exp(t14*t87*uinf67)*(t177*(-2.0/5.0)+(t177*(1.0/2.0)+1.0/2.0)*(uinf104*2.0E1+1.0)+2.0/5.0))/uinf30+(uinf68*uinf142*exp(t14*t87*uinf68)*(t180*(-2.0/5.0)+(t180*(1.0/2.0)+1.0/2.0)*(uinf105*2.0E1+1.0)+2.0/5.0))/uinf31+(uinf69*uinf143*exp(t14*t87*uinf69)*(t183*(-2.0/5.0)+(t183*(1.0/2.0)+1.0/2.0)*(uinf106*2.0E1+1.0)+2.0/5.0))/uinf32+(uinf70*uinf144*exp(t14*t87*uinf70)*(t186*(-2.0/5.0)+(t186*(1.0/2.0)+1.0/2.0)*(uinf107*2.0E1+1.0)+2.0/5.0))/uinf33+(uinf71*uinf145*exp(t14*t87*uinf71)*(t189*(-2.0/5.0)+(t189*(1.0/2.0)+1.0/2.0)*(uinf108*2.0E1+1.0)+2.0/5.0))/uinf34+(uinf72*uinf146*exp(t14*t87*uinf72)*(t192*(-2.0/5.0)+(t192*(1.0/2.0)+1.0/2.0)*(uinf109*2.0E1+1.0)+2.0/5.0))/uinf35+(uinf73*uinf147*exp(t14*t87*uinf73)*(t195*(-2.0/5.0)+(t195*(1.0/2.0)+1.0/2.0)*(uinf110*2.0E1+1.0)+2.0/5.0))/uinf36+(uinf74*uinf148*exp(t14*t87*uinf74)*(t198*(-2.0/5.0)+(t198*(1.0/2.0)+1.0/2.0)*(uinf111*2.0E1+1.0)+2.0/5.0))/uinf37);
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuSource(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uinf, T *param, T time, int modelnumber, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw)
{
int blockDim = 256;
int gridDim = (ng + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
kernelgpuSource<<<gridDim, blockDim>>>(f, xdg, udg, odg, wdg, uinf, param, time, modelnumber, ng, nc, ncu, nd, ncx, nco, ncw);
}
template void gpuSource(double *, double *, double *, double *, double *, double *, double *, double, int, int, int, int, int, int, int, int);
template void gpuSource(float *, float *, float *, float *, float *, float *, float *, float, int, int, int, int, int, int, int, int);
|
8b2afdea79b4cf9018a2ca0cfea37d75878df3c2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2020 Zhixu Zhao
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "tl_tensor_internal_cuda.h"
template <typename Ts, typename Td>
static __global__ void submean_kernel(const Ts *src, Td *dst, tl_dtype dst_dtype, double mean1,
double mean2, double mean3, int H, int W, int C,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
double mean[] = { mean1, mean2, mean3 };
int src_coords[TL_MAXDIM];
int dst_coords[TL_MAXDIM];
int src_dims[] = { H, W, C };
int dst_dims[] = { C, H, W };
tl_get_coords_cu(di, dst_coords, 3, dst_dims);
src_coords[0] = dst_coords[1];
src_coords[1] = dst_coords[2];
src_coords[2] = dst_coords[0];
int si = tl_get_index_cu(src_coords, 3, src_dims);
double res = (double)src[si] - mean[src_coords[2]];
tl_convert_device_cu(&dst[di], dst_dtype, &res, TL_DOUBLE);
}
/* src: H*W*C, dst: C*H*W */
TL_EXPORT tl_tensor *tl_tensor_submean_cuda(const tl_tensor *src, tl_tensor *dst,
const double *mean)
{
assert(src);
assert(tl_is_device_mem(src->data));
assert(mean);
assert(src->ndim == 3);
int new_dims[] = { src->dims[2], src->dims[0], src->dims[1] };
if (dst) {
assert(tl_is_device_mem(dst->data));
assert(dst->ndim == src->ndim);
assert(dst->dims[0] == 3);
} else {
dst = tl_tensor_zeros_cuda(src->ndim, new_dims, TL_FLOAT);
}
int thread_num, block_num;
thread_num = dst->len;
block_num = thread_num / BLOCK_SIZE + 1;
/*
* Generated by tools/generic.pl with
* $switchtype(src->dtype, T1)
* $switchtype(dst->dtype, T2)
* $typenoset(T1, TL_BOOL)
* $typenoset(T2, TL_BOOL)
* submean_kernel<T1, T2><<<block_num, BLOCK_SIZE>>>((T1 *)src->data, (T2 *)dst->data, dst->dtype, mean[0], mean[1], mean[2], src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
*/
switch (src->dtype) {
case TL_DOUBLE:
switch (dst->dtype) {
case TL_DOUBLE:
hipLaunchKernelGGL(( submean_kernel<double, double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(double *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
hipLaunchKernelGGL(( submean_kernel<double, float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(double *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
hipLaunchKernelGGL(( submean_kernel<double, int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(double *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
hipLaunchKernelGGL(( submean_kernel<double, int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(double *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
hipLaunchKernelGGL(( submean_kernel<double, int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(double *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
hipLaunchKernelGGL(( submean_kernel<double, uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(double *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
hipLaunchKernelGGL(( submean_kernel<double, uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(double *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
hipLaunchKernelGGL(( submean_kernel<double, uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(double *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
case TL_FLOAT:
switch (dst->dtype) {
case TL_DOUBLE:
hipLaunchKernelGGL(( submean_kernel<float, double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(float *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
hipLaunchKernelGGL(( submean_kernel<float, float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(float *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
hipLaunchKernelGGL(( submean_kernel<float, int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(float *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
hipLaunchKernelGGL(( submean_kernel<float, int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(float *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
hipLaunchKernelGGL(( submean_kernel<float, int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(float *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
hipLaunchKernelGGL(( submean_kernel<float, uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(float *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
hipLaunchKernelGGL(( submean_kernel<float, uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(float *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
hipLaunchKernelGGL(( submean_kernel<float, uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(float *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
case TL_INT16:
switch (dst->dtype) {
case TL_DOUBLE:
hipLaunchKernelGGL(( submean_kernel<int16_t, double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int16_t *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
hipLaunchKernelGGL(( submean_kernel<int16_t, float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int16_t *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
hipLaunchKernelGGL(( submean_kernel<int16_t, int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int16_t *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
hipLaunchKernelGGL(( submean_kernel<int16_t, int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int16_t *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
hipLaunchKernelGGL(( submean_kernel<int16_t, int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int16_t *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
hipLaunchKernelGGL(( submean_kernel<int16_t, uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int16_t *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
hipLaunchKernelGGL(( submean_kernel<int16_t, uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int16_t *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
hipLaunchKernelGGL(( submean_kernel<int16_t, uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int16_t *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
case TL_INT32:
switch (dst->dtype) {
case TL_DOUBLE:
hipLaunchKernelGGL(( submean_kernel<int32_t, double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int32_t *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
hipLaunchKernelGGL(( submean_kernel<int32_t, float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int32_t *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
hipLaunchKernelGGL(( submean_kernel<int32_t, int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int32_t *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
hipLaunchKernelGGL(( submean_kernel<int32_t, int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int32_t *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
hipLaunchKernelGGL(( submean_kernel<int32_t, int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int32_t *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
hipLaunchKernelGGL(( submean_kernel<int32_t, uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int32_t *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
hipLaunchKernelGGL(( submean_kernel<int32_t, uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int32_t *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
hipLaunchKernelGGL(( submean_kernel<int32_t, uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int32_t *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
case TL_INT8:
switch (dst->dtype) {
case TL_DOUBLE:
hipLaunchKernelGGL(( submean_kernel<int8_t, double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int8_t *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
hipLaunchKernelGGL(( submean_kernel<int8_t, float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int8_t *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
hipLaunchKernelGGL(( submean_kernel<int8_t, int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int8_t *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
hipLaunchKernelGGL(( submean_kernel<int8_t, int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int8_t *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
hipLaunchKernelGGL(( submean_kernel<int8_t, int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int8_t *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
hipLaunchKernelGGL(( submean_kernel<int8_t, uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int8_t *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
hipLaunchKernelGGL(( submean_kernel<int8_t, uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int8_t *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
hipLaunchKernelGGL(( submean_kernel<int8_t, uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(int8_t *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
case TL_UINT16:
switch (dst->dtype) {
case TL_DOUBLE:
hipLaunchKernelGGL(( submean_kernel<uint16_t, double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint16_t *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
hipLaunchKernelGGL(( submean_kernel<uint16_t, float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint16_t *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
hipLaunchKernelGGL(( submean_kernel<uint16_t, int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint16_t *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
hipLaunchKernelGGL(( submean_kernel<uint16_t, int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint16_t *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
hipLaunchKernelGGL(( submean_kernel<uint16_t, int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint16_t *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
hipLaunchKernelGGL(( submean_kernel<uint16_t, uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint16_t *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
hipLaunchKernelGGL(( submean_kernel<uint16_t, uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint16_t *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
hipLaunchKernelGGL(( submean_kernel<uint16_t, uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint16_t *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
case TL_UINT32:
switch (dst->dtype) {
case TL_DOUBLE:
hipLaunchKernelGGL(( submean_kernel<uint32_t, double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint32_t *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
hipLaunchKernelGGL(( submean_kernel<uint32_t, float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint32_t *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
hipLaunchKernelGGL(( submean_kernel<uint32_t, int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint32_t *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
hipLaunchKernelGGL(( submean_kernel<uint32_t, int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint32_t *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
hipLaunchKernelGGL(( submean_kernel<uint32_t, int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint32_t *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
hipLaunchKernelGGL(( submean_kernel<uint32_t, uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint32_t *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
hipLaunchKernelGGL(( submean_kernel<uint32_t, uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint32_t *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
hipLaunchKernelGGL(( submean_kernel<uint32_t, uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint32_t *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
case TL_UINT8:
switch (dst->dtype) {
case TL_DOUBLE:
hipLaunchKernelGGL(( submean_kernel<uint8_t, double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint8_t *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
hipLaunchKernelGGL(( submean_kernel<uint8_t, float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint8_t *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
hipLaunchKernelGGL(( submean_kernel<uint8_t, int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint8_t *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
hipLaunchKernelGGL(( submean_kernel<uint8_t, int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint8_t *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
hipLaunchKernelGGL(( submean_kernel<uint8_t, int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint8_t *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
hipLaunchKernelGGL(( submean_kernel<uint8_t, uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint8_t *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
hipLaunchKernelGGL(( submean_kernel<uint8_t, uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint8_t *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
hipLaunchKernelGGL(( submean_kernel<uint8_t, uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0,
(uint8_t *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
default:
assert(0 && "unsupported dtype for src->dtype");
break;
}
tl_cuda_device_sync();
return dst;
}
|
8b2afdea79b4cf9018a2ca0cfea37d75878df3c2.cu
|
/*
* Copyright (c) 2018-2020 Zhixu Zhao
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "tl_tensor_internal_cuda.h"
template <typename Ts, typename Td>
static __global__ void submean_kernel(const Ts *src, Td *dst, tl_dtype dst_dtype, double mean1,
double mean2, double mean3, int H, int W, int C,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
double mean[] = { mean1, mean2, mean3 };
int src_coords[TL_MAXDIM];
int dst_coords[TL_MAXDIM];
int src_dims[] = { H, W, C };
int dst_dims[] = { C, H, W };
tl_get_coords_cu(di, dst_coords, 3, dst_dims);
src_coords[0] = dst_coords[1];
src_coords[1] = dst_coords[2];
src_coords[2] = dst_coords[0];
int si = tl_get_index_cu(src_coords, 3, src_dims);
double res = (double)src[si] - mean[src_coords[2]];
tl_convert_device_cu(&dst[di], dst_dtype, &res, TL_DOUBLE);
}
/* src: H*W*C, dst: C*H*W */
TL_EXPORT tl_tensor *tl_tensor_submean_cuda(const tl_tensor *src, tl_tensor *dst,
const double *mean)
{
assert(src);
assert(tl_is_device_mem(src->data));
assert(mean);
assert(src->ndim == 3);
int new_dims[] = { src->dims[2], src->dims[0], src->dims[1] };
if (dst) {
assert(tl_is_device_mem(dst->data));
assert(dst->ndim == src->ndim);
assert(dst->dims[0] == 3);
} else {
dst = tl_tensor_zeros_cuda(src->ndim, new_dims, TL_FLOAT);
}
int thread_num, block_num;
thread_num = dst->len;
block_num = thread_num / BLOCK_SIZE + 1;
/*
* Generated by tools/generic.pl with
* $switchtype(src->dtype, T1)
* $switchtype(dst->dtype, T2)
* $typenoset(T1, TL_BOOL)
* $typenoset(T2, TL_BOOL)
* submean_kernel<T1, T2><<<block_num, BLOCK_SIZE>>>((T1 *)src->data, (T2 *)dst->data, dst->dtype, mean[0], mean[1], mean[2], src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
*/
switch (src->dtype) {
case TL_DOUBLE:
switch (dst->dtype) {
case TL_DOUBLE:
submean_kernel<double, double><<<block_num, BLOCK_SIZE>>>(
(double *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
submean_kernel<double, float><<<block_num, BLOCK_SIZE>>>(
(double *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
submean_kernel<double, int16_t><<<block_num, BLOCK_SIZE>>>(
(double *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
submean_kernel<double, int32_t><<<block_num, BLOCK_SIZE>>>(
(double *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
submean_kernel<double, int8_t><<<block_num, BLOCK_SIZE>>>(
(double *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
submean_kernel<double, uint16_t><<<block_num, BLOCK_SIZE>>>(
(double *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
submean_kernel<double, uint32_t><<<block_num, BLOCK_SIZE>>>(
(double *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
submean_kernel<double, uint8_t><<<block_num, BLOCK_SIZE>>>(
(double *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
case TL_FLOAT:
switch (dst->dtype) {
case TL_DOUBLE:
submean_kernel<float, double><<<block_num, BLOCK_SIZE>>>(
(float *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
submean_kernel<float, float><<<block_num, BLOCK_SIZE>>>(
(float *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
submean_kernel<float, int16_t><<<block_num, BLOCK_SIZE>>>(
(float *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
submean_kernel<float, int32_t><<<block_num, BLOCK_SIZE>>>(
(float *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
submean_kernel<float, int8_t><<<block_num, BLOCK_SIZE>>>(
(float *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
submean_kernel<float, uint16_t><<<block_num, BLOCK_SIZE>>>(
(float *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
submean_kernel<float, uint32_t><<<block_num, BLOCK_SIZE>>>(
(float *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
submean_kernel<float, uint8_t><<<block_num, BLOCK_SIZE>>>(
(float *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
case TL_INT16:
switch (dst->dtype) {
case TL_DOUBLE:
submean_kernel<int16_t, double><<<block_num, BLOCK_SIZE>>>(
(int16_t *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
submean_kernel<int16_t, float><<<block_num, BLOCK_SIZE>>>(
(int16_t *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
submean_kernel<int16_t, int16_t><<<block_num, BLOCK_SIZE>>>(
(int16_t *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
submean_kernel<int16_t, int32_t><<<block_num, BLOCK_SIZE>>>(
(int16_t *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
submean_kernel<int16_t, int8_t><<<block_num, BLOCK_SIZE>>>(
(int16_t *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
submean_kernel<int16_t, uint16_t><<<block_num, BLOCK_SIZE>>>(
(int16_t *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
submean_kernel<int16_t, uint32_t><<<block_num, BLOCK_SIZE>>>(
(int16_t *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
submean_kernel<int16_t, uint8_t><<<block_num, BLOCK_SIZE>>>(
(int16_t *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
case TL_INT32:
switch (dst->dtype) {
case TL_DOUBLE:
submean_kernel<int32_t, double><<<block_num, BLOCK_SIZE>>>(
(int32_t *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
submean_kernel<int32_t, float><<<block_num, BLOCK_SIZE>>>(
(int32_t *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
submean_kernel<int32_t, int16_t><<<block_num, BLOCK_SIZE>>>(
(int32_t *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
submean_kernel<int32_t, int32_t><<<block_num, BLOCK_SIZE>>>(
(int32_t *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
submean_kernel<int32_t, int8_t><<<block_num, BLOCK_SIZE>>>(
(int32_t *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
submean_kernel<int32_t, uint16_t><<<block_num, BLOCK_SIZE>>>(
(int32_t *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
submean_kernel<int32_t, uint32_t><<<block_num, BLOCK_SIZE>>>(
(int32_t *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
submean_kernel<int32_t, uint8_t><<<block_num, BLOCK_SIZE>>>(
(int32_t *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
case TL_INT8:
switch (dst->dtype) {
case TL_DOUBLE:
submean_kernel<int8_t, double><<<block_num, BLOCK_SIZE>>>(
(int8_t *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
submean_kernel<int8_t, float><<<block_num, BLOCK_SIZE>>>(
(int8_t *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
submean_kernel<int8_t, int16_t><<<block_num, BLOCK_SIZE>>>(
(int8_t *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
submean_kernel<int8_t, int32_t><<<block_num, BLOCK_SIZE>>>(
(int8_t *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
submean_kernel<int8_t, int8_t><<<block_num, BLOCK_SIZE>>>(
(int8_t *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
submean_kernel<int8_t, uint16_t><<<block_num, BLOCK_SIZE>>>(
(int8_t *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
submean_kernel<int8_t, uint32_t><<<block_num, BLOCK_SIZE>>>(
(int8_t *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
submean_kernel<int8_t, uint8_t><<<block_num, BLOCK_SIZE>>>(
(int8_t *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
case TL_UINT16:
switch (dst->dtype) {
case TL_DOUBLE:
submean_kernel<uint16_t, double><<<block_num, BLOCK_SIZE>>>(
(uint16_t *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
submean_kernel<uint16_t, float><<<block_num, BLOCK_SIZE>>>(
(uint16_t *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
submean_kernel<uint16_t, int16_t><<<block_num, BLOCK_SIZE>>>(
(uint16_t *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
submean_kernel<uint16_t, int32_t><<<block_num, BLOCK_SIZE>>>(
(uint16_t *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
submean_kernel<uint16_t, int8_t><<<block_num, BLOCK_SIZE>>>(
(uint16_t *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
submean_kernel<uint16_t, uint16_t><<<block_num, BLOCK_SIZE>>>(
(uint16_t *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
submean_kernel<uint16_t, uint32_t><<<block_num, BLOCK_SIZE>>>(
(uint16_t *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
submean_kernel<uint16_t, uint8_t><<<block_num, BLOCK_SIZE>>>(
(uint16_t *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
case TL_UINT32:
switch (dst->dtype) {
case TL_DOUBLE:
submean_kernel<uint32_t, double><<<block_num, BLOCK_SIZE>>>(
(uint32_t *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
submean_kernel<uint32_t, float><<<block_num, BLOCK_SIZE>>>(
(uint32_t *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
submean_kernel<uint32_t, int16_t><<<block_num, BLOCK_SIZE>>>(
(uint32_t *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
submean_kernel<uint32_t, int32_t><<<block_num, BLOCK_SIZE>>>(
(uint32_t *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
submean_kernel<uint32_t, int8_t><<<block_num, BLOCK_SIZE>>>(
(uint32_t *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
submean_kernel<uint32_t, uint16_t><<<block_num, BLOCK_SIZE>>>(
(uint32_t *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
submean_kernel<uint32_t, uint32_t><<<block_num, BLOCK_SIZE>>>(
(uint32_t *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
submean_kernel<uint32_t, uint8_t><<<block_num, BLOCK_SIZE>>>(
(uint32_t *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
case TL_UINT8:
switch (dst->dtype) {
case TL_DOUBLE:
submean_kernel<uint8_t, double><<<block_num, BLOCK_SIZE>>>(
(uint8_t *)src->data, (double *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
submean_kernel<uint8_t, float><<<block_num, BLOCK_SIZE>>>(
(uint8_t *)src->data, (float *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT16:
submean_kernel<uint8_t, int16_t><<<block_num, BLOCK_SIZE>>>(
(uint8_t *)src->data, (int16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT32:
submean_kernel<uint8_t, int32_t><<<block_num, BLOCK_SIZE>>>(
(uint8_t *)src->data, (int32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_INT8:
submean_kernel<uint8_t, int8_t><<<block_num, BLOCK_SIZE>>>(
(uint8_t *)src->data, (int8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
submean_kernel<uint8_t, uint16_t><<<block_num, BLOCK_SIZE>>>(
(uint8_t *)src->data, (uint16_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
submean_kernel<uint8_t, uint32_t><<<block_num, BLOCK_SIZE>>>(
(uint8_t *)src->data, (uint32_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
submean_kernel<uint8_t, uint8_t><<<block_num, BLOCK_SIZE>>>(
(uint8_t *)src->data, (uint8_t *)dst->data, dst->dtype, mean[0], mean[1], mean[2],
src->dims[0], src->dims[1], src->dims[2], BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported dtype for dst->dtype");
break;
}
break;
default:
assert(0 && "unsupported dtype for src->dtype");
break;
}
tl_cuda_device_sync();
return dst;
}
|
ba0ea00121abf5dc84d0f32f5e02f2419cb317ac.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "timer.h"
#include "utils.h"
__global__ void gaussian_blur(int* inputChannel,
int* outputChannel,
int numRows, int numCols,
int* filter, const int filterWidth,const int s, int oRows, int oCols)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
int x=blockIdx.x * blockDim.x + threadIdx.x;
int y=blockIdx.y * blockDim.y + threadIdx.y;
const int thread_1D_pos = y * oCols + x;
if ( x >=oCols ||y >= oRows )
{
return;
}
int sum=0;
//printf("gloc=%d =>threadId.x=%d,threadId.y=%d,blockId.x=%d,blocId.y=%d,position.x=%d,position.y=%d\n",thread_1D_pos,threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,threadIdx.x +(blockIdx.x* blockDim.x),threadIdx.y +(blockIdx.y* blockDim.y));
int kidx=0;
for(int r=0; r<filterWidth;++r){
for(int c=0; c<filterWidth;++c){
int idx=(y*s+r)*numCols+x*s+c;
int filter_value=filter[kidx++];
sum+=filter_value*inputChannel[idx];
}
}
outputChannel[thread_1D_pos]=sum;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
__global__
void gaussian_blur_row( int* inputChannel,
int* outputChannel,
int numRows, int numCols,
int* filter, const int filterWidth )
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
int absolute_image_position_x=thread_2D_pos.x;
int absolute_image_position_y=thread_2D_pos.y;
if ( absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows )
{
return;
}
int sum=0;
int i=-filterWidth/2;
int j=filterWidth/2;
if(thread_2D_pos.x < filterWidth/2)
i=-thread_2D_pos.x;
if((thread_2D_pos.x+filterWidth/2)>(numCols-1))
j=numCols-1-thread_2D_pos.x;
for(int c=i; c<=j;++c){
// for(int c=-filterWidth/2; c<=filterWidth/2;++c){
int xIdx=absolute_image_position_x+c;
int idx=(thread_2D_pos.y)*numCols+xIdx;
int filter_value=filter[c+filterWidth/2];
sum+=filter_value*(inputChannel[idx]);
//}
}
outputChannel[thread_1D_pos]=sum;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
__global__
void gaussian_blur_col(int* inputChannel,
int* outputChannel,
int numRows, int numCols,
int* filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
int absolute_image_position_x=thread_2D_pos.x;
int absolute_image_position_y=thread_2D_pos.y;
if ( absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows )
{
return;
}
int sum=0;
int i=-filterWidth/2;
int j=filterWidth/2;
if(thread_2D_pos.y < filterWidth/2)
i=-thread_2D_pos.y;
if((thread_2D_pos.y+filterWidth/2)>(numRows-1))
j=numRows-1-thread_2D_pos.y;
for(int r=i; r<=j;++r){
int yIdx=absolute_image_position_y+r;
int idx=(yIdx)*numCols+thread_2D_pos.x;
int filter_value=filter[r+filterWidth/2];
sum+=filter_value*(inputChannel[idx]);
//if(idx%10==0)
}
// fflush();
outputChannel[thread_1D_pos]=sum;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
int main(int argc, char **argv) {
int *d_inputImageRGBA;
int *h_outputImageRGBA, *d_outputImageRGBA ;
int *d_filter ;
const int numRows=5;
const int numCols=5;
const int filterWidth=3;
/*float img[numCols][numRows]={{1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0},
{9.0,8.0,7.0,6.0,5.0,4.0,3.0,2.0,1.0},
{1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0},
{9.0,8.0,7.0,6.0,5.0,4.0,3.0,2.0,1.0},
{1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0},
{9.0,8.0,7.0,6.0,5.0,4.0,3.0,2.0,1.0},
{1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0},
{9.0,8.0,7.0,6.0,5.0,4.0,3.0,2.0,1.0},
{1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0}};*/
/*int h_inputImageRGBA[numCols*numRows]={1,6, 3, 8, 13,
2,7,4,9,14,
3,8,5,10,15,
4,9,6,11,16,
5,10,7,12,17};
//float filter[filterWidth]={1/16,1/8,1/16};
int h_filter[filterWidth*filterWidth]={1,0,1,0,0,0,1,0,1};*/
int h_inputImageRGBA[numCols*numRows]={1,6, 3, 8, 13,
2,7,4,9,14,
3,8,5,10,15,
4,9,6,11,16,
5,10,7,12,17};
//float filter[filterWidth]={1/16,1/8,1/16};
int h_filter[filterWidth*filterWidth]={1,0,1,0,0,0,1,0,1};
//h_filter=filter;
//h_inputImageRGBA=(int*) malloc ( sizeof(int)* numRows * numCols);
//h_inputImageRGBA=filter;
const long numPixels = numRows * numCols;
const int s=1;
const int oCol=(numCols-filterWidth)/s+1;
const int oRow=(numRows-filterWidth)/s+1;
const int oNumPixels=oCol*oRow;
h_outputImageRGBA=(int*) malloc ( sizeof(int)* oNumPixels);
//int test[oNumPixels]={0,0,0,0};
//allocate memory on the device for both input and output
checkCudaErrors(hipMalloc((void**)&d_inputImageRGBA, sizeof(int) * numPixels));
checkCudaErrors(hipMalloc((void**)&d_outputImageRGBA, sizeof(int) *oNumPixels));
checkCudaErrors(hipMemset(d_outputImageRGBA, 0, oNumPixels * sizeof(int))); //make sure no memory is left laying around
//printf("sizeOf d_inputImageRGBA= %d ,d_outputImageRGBA=%d, %d\n",sizeof(d_inputImageRGBA),sizeof(d_outputImageRGBA),numPixels * sizeof(int));
//copy input array to the GPU
checkCudaErrors(hipMemcpy(d_inputImageRGBA, h_inputImageRGBA, sizeof(int) * numPixels, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**)&d_filter, sizeof(int) * filterWidth*filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter,h_filter, sizeof(int) * filterWidth*filterWidth,hipMemcpyHostToDevice));
GpuTimer timer;
timer.Start();
//call the students' code
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(1,1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(oCol/blockSize.x +1,oRow/blockSize.y +1);
//TODO: Call your convolution kernel here 3 times, once for each color channel.
/* gaussian_blur_row<<<gridSize, blockSize>>>(d_inputImageRGBA,
d_outputImageRGBA,
numRows,
numCols,
d_filter,
filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
gaussian_blur_col<<<gridSize, blockSize>>>(d_outputImageRGBA,
d_inputImageRGBA,
numRows,
numCols,
d_filter,
filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());*/hipLaunchKernelGGL((
gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA,
d_outputImageRGBA,
numRows,
numCols,
d_filter,
filterWidth,s,oRow,oCol);
timer.Stop();
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
int err = printf("Your code ran in: %f msecs.\n", timer.Elapsed());
if (err < 0) {
//Couldn't print! Probably the student closed stdout - bad news
std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl;
exit(1);
}
//check results and output the blurred image
//copy the output back to the host
//printf("sizeOf d_inputImageRGBA= %d ,d_outputImageRGBA=%d",sizeof(d_inputImageRGBA),sizeof(d_outputImageRGBA));
checkCudaErrors(hipMemcpy(h_outputImageRGBA, d_outputImageRGBA, sizeof(int) * oNumPixels, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_inputImageRGBA));
checkCudaErrors(hipFree(d_outputImageRGBA));
checkCudaErrors(hipFree(d_filter));
for(int i=0;i<oNumPixels;i++){
printf("\t%d",h_outputImageRGBA[i]);
if(i%6==0)
printf("\n");
}
return 0;
}
|
ba0ea00121abf5dc84d0f32f5e02f2419cb317ac.cu
|
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "timer.h"
#include "utils.h"
__global__ void gaussian_blur(int* inputChannel,
int* outputChannel,
int numRows, int numCols,
int* filter, const int filterWidth,const int s, int oRows, int oCols)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
int x=blockIdx.x * blockDim.x + threadIdx.x;
int y=blockIdx.y * blockDim.y + threadIdx.y;
const int thread_1D_pos = y * oCols + x;
if ( x >=oCols ||y >= oRows )
{
return;
}
int sum=0;
//printf("gloc=%d =>threadId.x=%d,threadId.y=%d,blockId.x=%d,blocId.y=%d,position.x=%d,position.y=%d\n",thread_1D_pos,threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,threadIdx.x +(blockIdx.x* blockDim.x),threadIdx.y +(blockIdx.y* blockDim.y));
int kidx=0;
for(int r=0; r<filterWidth;++r){
for(int c=0; c<filterWidth;++c){
int idx=(y*s+r)*numCols+x*s+c;
int filter_value=filter[kidx++];
sum+=filter_value*inputChannel[idx];
}
}
outputChannel[thread_1D_pos]=sum;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
__global__
void gaussian_blur_row( int* inputChannel,
int* outputChannel,
int numRows, int numCols,
int* filter, const int filterWidth )
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
int absolute_image_position_x=thread_2D_pos.x;
int absolute_image_position_y=thread_2D_pos.y;
if ( absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows )
{
return;
}
int sum=0;
int i=-filterWidth/2;
int j=filterWidth/2;
if(thread_2D_pos.x < filterWidth/2)
i=-thread_2D_pos.x;
if((thread_2D_pos.x+filterWidth/2)>(numCols-1))
j=numCols-1-thread_2D_pos.x;
for(int c=i; c<=j;++c){
// for(int c=-filterWidth/2; c<=filterWidth/2;++c){
int xIdx=absolute_image_position_x+c;
int idx=(thread_2D_pos.y)*numCols+xIdx;
int filter_value=filter[c+filterWidth/2];
sum+=filter_value*(inputChannel[idx]);
//}
}
outputChannel[thread_1D_pos]=sum;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
__global__
void gaussian_blur_col(int* inputChannel,
int* outputChannel,
int numRows, int numCols,
int* filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
int absolute_image_position_x=thread_2D_pos.x;
int absolute_image_position_y=thread_2D_pos.y;
if ( absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows )
{
return;
}
int sum=0;
int i=-filterWidth/2;
int j=filterWidth/2;
if(thread_2D_pos.y < filterWidth/2)
i=-thread_2D_pos.y;
if((thread_2D_pos.y+filterWidth/2)>(numRows-1))
j=numRows-1-thread_2D_pos.y;
for(int r=i; r<=j;++r){
int yIdx=absolute_image_position_y+r;
int idx=(yIdx)*numCols+thread_2D_pos.x;
int filter_value=filter[r+filterWidth/2];
sum+=filter_value*(inputChannel[idx]);
//if(idx%10==0)
}
// fflush();
outputChannel[thread_1D_pos]=sum;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
int main(int argc, char **argv) {
int *d_inputImageRGBA;
int *h_outputImageRGBA, *d_outputImageRGBA ;
int *d_filter ;
const int numRows=5;
const int numCols=5;
const int filterWidth=3;
/*float img[numCols][numRows]={{1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0},
{9.0,8.0,7.0,6.0,5.0,4.0,3.0,2.0,1.0},
{1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0},
{9.0,8.0,7.0,6.0,5.0,4.0,3.0,2.0,1.0},
{1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0},
{9.0,8.0,7.0,6.0,5.0,4.0,3.0,2.0,1.0},
{1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0},
{9.0,8.0,7.0,6.0,5.0,4.0,3.0,2.0,1.0},
{1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0}};*/
/*int h_inputImageRGBA[numCols*numRows]={1,6, 3, 8, 13,
2,7,4,9,14,
3,8,5,10,15,
4,9,6,11,16,
5,10,7,12,17};
//float filter[filterWidth]={1/16,1/8,1/16};
int h_filter[filterWidth*filterWidth]={1,0,1,0,0,0,1,0,1};*/
int h_inputImageRGBA[numCols*numRows]={1,6, 3, 8, 13,
2,7,4,9,14,
3,8,5,10,15,
4,9,6,11,16,
5,10,7,12,17};
//float filter[filterWidth]={1/16,1/8,1/16};
int h_filter[filterWidth*filterWidth]={1,0,1,0,0,0,1,0,1};
//h_filter=filter;
//h_inputImageRGBA=(int*) malloc ( sizeof(int)* numRows * numCols);
//h_inputImageRGBA=filter;
const long numPixels = numRows * numCols;
const int s=1;
const int oCol=(numCols-filterWidth)/s+1;
const int oRow=(numRows-filterWidth)/s+1;
const int oNumPixels=oCol*oRow;
h_outputImageRGBA=(int*) malloc ( sizeof(int)* oNumPixels);
//int test[oNumPixels]={0,0,0,0};
//allocate memory on the device for both input and output
checkCudaErrors(cudaMalloc((void**)&d_inputImageRGBA, sizeof(int) * numPixels));
checkCudaErrors(cudaMalloc((void**)&d_outputImageRGBA, sizeof(int) *oNumPixels));
checkCudaErrors(cudaMemset(d_outputImageRGBA, 0, oNumPixels * sizeof(int))); //make sure no memory is left laying around
//printf("sizeOf d_inputImageRGBA= %d ,d_outputImageRGBA=%d, %d\n",sizeof(d_inputImageRGBA),sizeof(d_outputImageRGBA),numPixels * sizeof(int));
//copy input array to the GPU
checkCudaErrors(cudaMemcpy(d_inputImageRGBA, h_inputImageRGBA, sizeof(int) * numPixels, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**)&d_filter, sizeof(int) * filterWidth*filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter,h_filter, sizeof(int) * filterWidth*filterWidth,cudaMemcpyHostToDevice));
GpuTimer timer;
timer.Start();
//call the students' code
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(1,1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(oCol/blockSize.x +1,oRow/blockSize.y +1);
//TODO: Call your convolution kernel here 3 times, once for each color channel.
/* gaussian_blur_row<<<gridSize, blockSize>>>(d_inputImageRGBA,
d_outputImageRGBA,
numRows,
numCols,
d_filter,
filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur_col<<<gridSize, blockSize>>>(d_outputImageRGBA,
d_inputImageRGBA,
numRows,
numCols,
d_filter,
filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());*/
gaussian_blur<<<gridSize, blockSize>>>(d_inputImageRGBA,
d_outputImageRGBA,
numRows,
numCols,
d_filter,
filterWidth,s,oRow,oCol);
timer.Stop();
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
int err = printf("Your code ran in: %f msecs.\n", timer.Elapsed());
if (err < 0) {
//Couldn't print! Probably the student closed stdout - bad news
std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl;
exit(1);
}
//check results and output the blurred image
//copy the output back to the host
//printf("sizeOf d_inputImageRGBA= %d ,d_outputImageRGBA=%d",sizeof(d_inputImageRGBA),sizeof(d_outputImageRGBA));
checkCudaErrors(cudaMemcpy(h_outputImageRGBA, d_outputImageRGBA, sizeof(int) * oNumPixels, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_inputImageRGBA));
checkCudaErrors(cudaFree(d_outputImageRGBA));
checkCudaErrors(cudaFree(d_filter));
for(int i=0;i<oNumPixels;i++){
printf("\t%d",h_outputImageRGBA[i]);
if(i%6==0)
printf("\n");
}
return 0;
}
|
91ae0591068d821e4530f147adbcdd2bf6e43394.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hiprand/hiprand.h"
#define _USE_MATH_DEFINES
#include <math.h>
#include <iostream>
using namespace std;
__device__ __host__ __inline__ float N(float x) {
return 0.5 * 0.5 * erf(x * M_SQRT1_2);
}
__device__ __host__ void price(float k, float s, float t, float r, float v, float* c, float* p) {
float srt = v * sqrt(t);
float d1 = (logf(s / k) + (r + 0.5 * v * v) * t) / srt;
float d2 = d1 - srt;
float kert =k * expf(-r * t);
*c = N(d1) * s - N(d2) * kert;
*p = kert - s + *c;
}
__global__ void price(float* k, float* s, float* t, float* r, float* v, float* c, float* p) {
int idx = threadIdx.x;
price(k[idx], s[idx], t[idx], r[idx], v[idx], &c[idx], &p[idx]);
}
int main() {
float* args[5];
const int count = 512;
hiprandStatus_t hiprandStatus_t;
hipError_t hipError_t;
hiprandGenerator_t gen;
hiprandStatus_t = hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MTGP32);
for (int i = 0; i < 5; ++i) {
hipMalloc(&args[i], sizeof(float) * 1024);
hiprandStatus_t = hiprandGenerateUniform(gen, args[i], count);
}
float *dc, *dp;
hipError_t = hipMalloc(&dc, count * sizeof(float));
hipError_t = hipMalloc(&dp, count * sizeof(float));
hipLaunchKernelGGL(( price), dim3(1), dim3(count), 0, 0, args[0], args[1], args[2], args[3], args[4], dc, dp);
float hc[count] = { 0 };
float hp[count] = { 0 };
hipMemcpy(hc, dc, sizeof(float) * count, hipMemcpyDeviceToHost);
hipMemcpy(hp, dp, sizeof(float) * count, hipMemcpyDeviceToHost);
hipFree(dc);
hipFree(dp);
for (int i = 0; i < 5; ++i) {
hipFree(&args[i]);
}
hipDeviceReset();
cout << "HC:";
for (int i = 0; i < count; i++) {
cout << hc[i] << '\t';
}
cout << '\n' << "HP:";
for (int i = 0; i < count; i++) {
cout << hp[i] << '\t';
}
return 0;
}
|
91ae0591068d821e4530f147adbcdd2bf6e43394.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "curand.h"
#define _USE_MATH_DEFINES
#include <math.h>
#include <iostream>
using namespace std;
__device__ __host__ __inline__ float N(float x) {
return 0.5 * 0.5 * erf(x * M_SQRT1_2);
}
__device__ __host__ void price(float k, float s, float t, float r, float v, float* c, float* p) {
float srt = v * sqrt(t);
float d1 = (logf(s / k) + (r + 0.5 * v * v) * t) / srt;
float d2 = d1 - srt;
float kert =k * expf(-r * t);
*c = N(d1) * s - N(d2) * kert;
*p = kert - s + *c;
}
__global__ void price(float* k, float* s, float* t, float* r, float* v, float* c, float* p) {
int idx = threadIdx.x;
price(k[idx], s[idx], t[idx], r[idx], v[idx], &c[idx], &p[idx]);
}
int main() {
float* args[5];
const int count = 512;
curandStatus_t curandStatus;
cudaError_t cudaError;
curandGenerator_t gen;
curandStatus = curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MTGP32);
for (int i = 0; i < 5; ++i) {
cudaMalloc(&args[i], sizeof(float) * 1024);
curandStatus = curandGenerateUniform(gen, args[i], count);
}
float *dc, *dp;
cudaError = cudaMalloc(&dc, count * sizeof(float));
cudaError = cudaMalloc(&dp, count * sizeof(float));
price<<<1, count>>>(args[0], args[1], args[2], args[3], args[4], dc, dp);
float hc[count] = { 0 };
float hp[count] = { 0 };
cudaMemcpy(hc, dc, sizeof(float) * count, cudaMemcpyDeviceToHost);
cudaMemcpy(hp, dp, sizeof(float) * count, cudaMemcpyDeviceToHost);
cudaFree(dc);
cudaFree(dp);
for (int i = 0; i < 5; ++i) {
cudaFree(&args[i]);
}
cudaDeviceReset();
cout << "HC:";
for (int i = 0; i < count; i++) {
cout << hc[i] << '\t';
}
cout << '\n' << "HP:";
for (int i = 0; i < count; i++) {
cout << hp[i] << '\t';
}
return 0;
}
|
5948fa91db1f98a5bf71ceaa1807cf1de63817a4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void findDiffLabels(float* devDiff, int diffPitchInFloats, int nPoints, int nClusters, int* devClusters, int* devChanges) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ unsigned int localChanges;
if (x < nPoints) {
int index = x;
float minDistance = 10000000;
int minCluster = -1;
for(int cluster = 0; cluster < nClusters; cluster++) {
float clusterDistance = devDiff[index];
if (clusterDistance < minDistance) {
minDistance = clusterDistance;
minCluster = cluster;
}
index += diffPitchInFloats;
}
int previousCluster = devClusters[x];
devClusters[x] = minCluster;
if (minCluster != previousCluster) {
atomicInc(&localChanges, 10000000);
}
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(devChanges, localChanges);
}
}
|
5948fa91db1f98a5bf71ceaa1807cf1de63817a4.cu
|
#include "includes.h"
__global__ void findDiffLabels(float* devDiff, int diffPitchInFloats, int nPoints, int nClusters, int* devClusters, int* devChanges) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ unsigned int localChanges;
if (x < nPoints) {
int index = x;
float minDistance = 10000000;
int minCluster = -1;
for(int cluster = 0; cluster < nClusters; cluster++) {
float clusterDistance = devDiff[index];
if (clusterDistance < minDistance) {
minDistance = clusterDistance;
minCluster = cluster;
}
index += diffPitchInFloats;
}
int previousCluster = devClusters[x];
devClusters[x] = minCluster;
if (minCluster != previousCluster) {
atomicInc(&localChanges, 10000000);
}
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(devChanges, localChanges);
}
}
|
fdbbe4546db487a1fd9e33565ae579c372cc47e3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <set>
#include <vector>
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
struct SelectedRowsAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const framework::SelectedRows& input2,
framework::SelectedRows* output) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height, input2.height(),
platform::errors::InvalidArgument("The two inputs height must be equal."
"But recieved first input height = "
"[%d], second input height = [%d]",
in1_height, input2.height()));
output->set_height(in1_height);
framework::Vector<int64_t> in1_rows(input1.rows());
auto& in2_rows = input2.rows();
std::vector<int64_t> out_rows;
out_rows.reserve(in1_rows.size() + in2_rows.size());
// concat rows
out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end());
out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end());
output->set_rows(out_rows);
auto* out_value = output->mutable_value();
auto& in1_value = input1.value();
auto& in2_value = input2.value();
auto in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel, in2_value.numel() / in2_rows.size(),
platform::errors::InvalidArgument(
"The two inputs width must be equal."
"But recieved first input width = [%d], second input width = [%d]",
in1_row_numel, in2_value.numel() / in2_rows.size()));
PADDLE_ENFORCE_EQ(
in1_row_numel, out_value->numel() / out_rows.size(),
platform::errors::InvalidArgument(
"The input and oupput width must be equal."
"But recieved input width = [%d], output width = [%d]",
in1_row_numel, out_value->numel() / out_rows.size()));
auto* out_data = out_value->data<T>();
auto* in1_data = in1_value.data<T>();
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in1_place), true,
platform::errors::InvalidArgument(
"The running enviroment is not on the GPU place."));
auto in2_place = input2.place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in2_place), true,
platform::errors::InvalidArgument(
"The running enviroment is not on the GPU place."));
auto out_place = context.GetPlace();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(out_place), true,
platform::errors::InvalidArgument(
"The running enviroment is not on the GPU place."));
memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, out_place), out_data,
BOOST_GET_CONST(platform::CUDAPlace, in1_place), in1_data,
in1_value.numel() * sizeof(T), context.stream());
auto* in2_data = in2_value.data<T>();
memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, out_place),
out_data + in1_value.numel(),
BOOST_GET_CONST(platform::CUDAPlace, in2_place), in2_data,
in2_value.numel() * sizeof(T), context.stream());
}
};
template struct SelectedRowsAdd<platform::CUDADeviceContext, float>;
template struct SelectedRowsAdd<platform::CUDADeviceContext, double>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddTensorKernel(const T* selected_rows,
const int64_t* rows, T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we can not use
// tensor_out[index] += selected_rows[index]; Instead, we have to use
// AtomicAdd to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const framework::Tensor& input2, framework::Tensor* output) {
auto in1_height = input1.height();
auto in2_dims = input2.dims();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(
in1_height, in2_dims[0],
platform::errors::InvalidArgument(
"The two inputs height must be equal."
"But recieved first input height = [%d], first input height = [%d]",
in1_height, in2_dims[0]));
PADDLE_ENFORCE_EQ(
in1_height, out_dims[0],
platform::errors::InvalidArgument(
"The input and output height must be equal."
"But recieved input height = [%d], output height = [%d]",
in1_height, out_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel, input2.numel() / in1_height,
platform::errors::InvalidArgument(
"The two inputs width must be equal."
"But recieved first input width = [%d], second input width = [%d]",
in1_row_numel, input2.numel() / in1_height));
PADDLE_ENFORCE_EQ(
in1_row_numel, output->numel() / in1_height,
platform::errors::InvalidArgument(
"The input and output width must be equal."
"But recieved input width = [%d], output width = [%d]",
in1_row_numel, output->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2.data<T>();
auto* out_data = output->data<T>();
SetConstant<platform::CUDADeviceContext, T> functor;
functor(context, output, static_cast<T>(0));
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
hipLaunchKernelGGL(( SelectedRowsAddTensorKernel<
T, block_size>), dim3(grid), dim3(threads), 0, context.stream(),
in1_data, in1_rows.CUDAData(context.GetPlace()), out_data,
in1_row_numel);
auto out_eigen = framework::EigenVector<T>::Flatten(*output);
auto in2_eigen = framework::EigenVector<T>::Flatten(input2);
out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen;
}
};
template struct SelectedRowsAddTensor<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddTensor<platform::CUDADeviceContext, double>;
template struct SelectedRowsAdd<platform::CUDADeviceContext, platform::float16>;
template struct SelectedRowsAddTensor<platform::CUDADeviceContext,
platform::float16>;
template <typename T>
struct SelectedRowsAddTo<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const int64_t input2_offset,
framework::SelectedRows* input2) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height, input2->height(),
platform::errors::InvalidArgument("The two inputs height must be equal."
"But recieved first input height = "
"[%d], second input height = [%d]",
in1_height, input2->height()));
auto& in1_rows = input1.rows();
auto& in2_rows = *(input2->mutable_rows());
auto& in1_value = input1.value();
auto* in2_value = input2->mutable_value();
// concat rows
if (in1_rows.size()) {
in2_rows.Extend(in1_rows.begin(), in1_rows.end());
}
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in1_place), true,
platform::errors::InvalidArgument(
"The running enviroment is not on the GPU place."));
auto in2_place = input2->place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in1_place), true,
platform::errors::InvalidArgument(
"The running enviroment is not on the GPU place."));
auto* in1_data = in1_value.data<T>();
auto* in2_data = in2_value->data<T>();
memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, in2_place),
in2_data + input2_offset,
BOOST_GET_CONST(platform::CUDAPlace, in1_place), in1_data,
in1_value.numel() * sizeof(T), context.stream());
}
};
template struct SelectedRowsAddTo<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, double>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, int>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, int64_t>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext,
platform::float16>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddToTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddToTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
framework::Tensor* input2) {
auto in1_height = input1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height, in2_dims[0],
platform::errors::InvalidArgument("The two inputs height must be equal."
"But recieved first input height = "
"[%d], second input height = [%d]",
in1_height, in2_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel, input2->numel() / in1_height,
platform::errors::InvalidArgument(
"The two inputs width must be equal."
"But recieved first input width = [%d], second input width = [%d]",
in1_row_numel, input2->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
hipLaunchKernelGGL(( SelectedRowsAddToTensorKernel<
T, block_size>), dim3(grid), dim3(threads), 0, context.stream(),
in1_data, in1_rows.CUDAData(context.GetPlace()), in2_data,
in1_row_numel);
}
};
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, double>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int64_t>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext,
platform::float16>;
namespace scatter {
template <typename T, int block_size>
__global__ void MergeAddKernel(const T* input, const int64_t* input_rows,
T* out, const int64_t* out_rows,
size_t out_rows_size, int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
__shared__ size_t out_idx;
if (tid == 0) {
for (size_t i = 0; i < out_rows_size; i++) {
if (input_rows[ty] == out_rows[i]) {
out_idx = i;
}
}
}
__syncthreads();
input += ty * row_numel;
out += out_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
paddle::platform::CudaAtomicAdd(out + index, input[index]);
}
}
template <typename T>
struct MergeAdd<platform::CUDADeviceContext, T> {
framework::SelectedRows operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input,
const bool sorted_result = false) {
framework::SelectedRows out;
(*this)(context, input, &out);
return out;
}
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input,
framework::SelectedRows* output,
const bool sorted_result = false) {
framework::Vector<int64_t> input_rows(input.rows());
if (input_rows.size() == 0) {
return;
}
framework::SelectedRows& out = *output;
std::set<int64_t> row_set(input_rows.begin(), input_rows.end());
std::vector<int64_t> merge_rows_cpu(row_set.begin(), row_set.end());
framework::Vector<int64_t> merge_rows(merge_rows_cpu);
auto input_width = input.value().dims()[1];
out.set_rows(merge_rows);
out.set_height(input.height());
out.mutable_value()->mutable_data<T>(
framework::make_ddim(
{static_cast<int64_t>(merge_rows.size()), input_width}),
context.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
auto* input_data = input.value().data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid1(input_rows.size(), 1);
hipLaunchKernelGGL(( MergeAddKernel<T, 256>), dim3(grid1), dim3(threads), 0, context.stream(),
input_data, input_rows.CUDAData(context.GetPlace()), out_data,
out.mutable_rows()->CUDAMutableData(context.GetPlace()),
out.rows().size(), input_width);
}
void operator()(const platform::CUDADeviceContext& context,
const std::vector<const framework::SelectedRows*>& inputs,
framework::SelectedRows* output,
const bool sorted_result = false) {
if (inputs.size() == 0) {
VLOG(3) << "no input! return";
return;
}
const framework::SelectedRows* has_value_input = nullptr;
for (auto* in : inputs) {
if (in->rows().size() > 0) {
has_value_input = in;
break;
}
}
if (has_value_input == nullptr) {
VLOG(3) << "no input has value! just return" << std::endl;
return;
}
auto input_width = has_value_input->value().dims()[1];
auto input_height = has_value_input->height();
framework::SelectedRows& out = *output;
std::set<int64_t> merged_row_set;
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
PADDLE_ENFORCE_EQ(input_width, input->value().dims()[1],
platform::errors::InvalidArgument(
"All input should have same "
"dimension except for the first one."));
PADDLE_ENFORCE_EQ(input_height, input->height(),
platform::errors::InvalidArgument(
"All input should have same height."));
merged_row_set.insert(input->rows().begin(), input->rows().end());
}
std::vector<int64_t> merge_rows_cpu(merged_row_set.begin(),
merged_row_set.end());
framework::Vector<int64_t> merge_rows(merge_rows_cpu);
out.set_rows(merge_rows);
out.set_height(input_height);
out.mutable_value()->mutable_data<T>(
framework::make_ddim(
{static_cast<int64_t>(merge_rows.size()), input_width}),
context.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
auto* input_data = input->value().data<T>();
auto& input_rows = input->rows();
dim3 grid1(input_rows.size(), 1);
hipLaunchKernelGGL(( MergeAddKernel<T, 256>), dim3(grid1), dim3(threads), 0, context.stream(),
input_data, input_rows.CUDAData(context.GetPlace()), out_data,
out.mutable_rows()->CUDAMutableData(context.GetPlace()),
out.rows().size(), input_width);
}
}
};
template struct MergeAdd<platform::CUDADeviceContext, float>;
template struct MergeAdd<platform::CUDADeviceContext, double>;
template struct MergeAdd<platform::CUDADeviceContext, int>;
template struct MergeAdd<platform::CUDADeviceContext, int64_t>;
template struct MergeAdd<platform::CUDADeviceContext, platform::float16>;
template struct MergeAdd<platform::CUDADeviceContext, platform::complex<float>>;
template struct MergeAdd<platform::CUDADeviceContext,
platform::complex<double>>;
template <typename T, int block_size>
__global__ void UpdateToTensorKernel(const T* selected_rows,
const int64_t* rows, const ScatterOps& op,
T* tensor_out, int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
// FIXME(typhoonzero): use macro fix the below messy code.
switch (op) {
case ScatterOps::ASSIGN:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index];
}
break;
case ScatterOps::ADD:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] += selected_rows[index];
}
break;
case ScatterOps::SUB:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] -= selected_rows[index];
}
break;
case ScatterOps::SUBBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] - tensor_out[index];
}
break;
case ScatterOps::MUL:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] *= selected_rows[index];
}
break;
case ScatterOps::DIV:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] /= selected_rows[index];
}
break;
case ScatterOps::DIVBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] / tensor_out[index];
}
break;
}
}
template <typename T>
struct UpdateToTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const ScatterOps& op, const framework::SelectedRows& input1,
framework::Tensor* input2) {
// NOTE: Use SelectedRowsAddToTensor for better performance
// no additional MergeAdd called.
MergeAdd<platform::CUDADeviceContext, T> merge_func;
auto merged_in1 = merge_func(context, input1);
auto in1_height = merged_in1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height, in2_dims[0],
platform::errors::InvalidArgument("The two inputs height must be equal."
"But recieved first input height = "
"[%d], second input height = [%d]",
in1_height, in2_dims[0]));
auto& in1_value = merged_in1.value();
auto& in1_rows = merged_in1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel, input2->numel() / in1_height,
platform::errors::InvalidArgument(
"The two inputs width must be equal."
"But recieved first input width = [%d], second input width = [%d]",
in1_row_numel, input2->numel() / in1_height));
auto* in1_data = in1_value.template data<T>();
auto* in2_data = input2->data<T>();
dim3 threads(platform::PADDLE_CUDA_NUM_THREADS, 1);
dim3 grid(in1_rows.size(), 1);
hipLaunchKernelGGL(( UpdateToTensorKernel<T, platform::PADDLE_CUDA_NUM_THREADS>),
dim3(grid), dim3(threads), 0, context.stream(), in1_data, in1_rows.cuda_data(),
op, in2_data, in1_row_numel);
}
};
} // namespace scatter
} // namespace math
} // namespace operators
} // namespace paddle
|
fdbbe4546db487a1fd9e33565ae579c372cc47e3.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <set>
#include <vector>
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
struct SelectedRowsAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const framework::SelectedRows& input2,
framework::SelectedRows* output) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height, input2.height(),
platform::errors::InvalidArgument("The two inputs height must be equal."
"But recieved first input height = "
"[%d], second input height = [%d]",
in1_height, input2.height()));
output->set_height(in1_height);
framework::Vector<int64_t> in1_rows(input1.rows());
auto& in2_rows = input2.rows();
std::vector<int64_t> out_rows;
out_rows.reserve(in1_rows.size() + in2_rows.size());
// concat rows
out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end());
out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end());
output->set_rows(out_rows);
auto* out_value = output->mutable_value();
auto& in1_value = input1.value();
auto& in2_value = input2.value();
auto in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel, in2_value.numel() / in2_rows.size(),
platform::errors::InvalidArgument(
"The two inputs width must be equal."
"But recieved first input width = [%d], second input width = [%d]",
in1_row_numel, in2_value.numel() / in2_rows.size()));
PADDLE_ENFORCE_EQ(
in1_row_numel, out_value->numel() / out_rows.size(),
platform::errors::InvalidArgument(
"The input and oupput width must be equal."
"But recieved input width = [%d], output width = [%d]",
in1_row_numel, out_value->numel() / out_rows.size()));
auto* out_data = out_value->data<T>();
auto* in1_data = in1_value.data<T>();
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in1_place), true,
platform::errors::InvalidArgument(
"The running enviroment is not on the GPU place."));
auto in2_place = input2.place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in2_place), true,
platform::errors::InvalidArgument(
"The running enviroment is not on the GPU place."));
auto out_place = context.GetPlace();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(out_place), true,
platform::errors::InvalidArgument(
"The running enviroment is not on the GPU place."));
memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, out_place), out_data,
BOOST_GET_CONST(platform::CUDAPlace, in1_place), in1_data,
in1_value.numel() * sizeof(T), context.stream());
auto* in2_data = in2_value.data<T>();
memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, out_place),
out_data + in1_value.numel(),
BOOST_GET_CONST(platform::CUDAPlace, in2_place), in2_data,
in2_value.numel() * sizeof(T), context.stream());
}
};
template struct SelectedRowsAdd<platform::CUDADeviceContext, float>;
template struct SelectedRowsAdd<platform::CUDADeviceContext, double>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddTensorKernel(const T* selected_rows,
const int64_t* rows, T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we can not use
// tensor_out[index] += selected_rows[index]; Instead, we have to use
// AtomicAdd to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const framework::Tensor& input2, framework::Tensor* output) {
auto in1_height = input1.height();
auto in2_dims = input2.dims();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(
in1_height, in2_dims[0],
platform::errors::InvalidArgument(
"The two inputs height must be equal."
"But recieved first input height = [%d], first input height = [%d]",
in1_height, in2_dims[0]));
PADDLE_ENFORCE_EQ(
in1_height, out_dims[0],
platform::errors::InvalidArgument(
"The input and output height must be equal."
"But recieved input height = [%d], output height = [%d]",
in1_height, out_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel, input2.numel() / in1_height,
platform::errors::InvalidArgument(
"The two inputs width must be equal."
"But recieved first input width = [%d], second input width = [%d]",
in1_row_numel, input2.numel() / in1_height));
PADDLE_ENFORCE_EQ(
in1_row_numel, output->numel() / in1_height,
platform::errors::InvalidArgument(
"The input and output width must be equal."
"But recieved input width = [%d], output width = [%d]",
in1_row_numel, output->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2.data<T>();
auto* out_data = output->data<T>();
SetConstant<platform::CUDADeviceContext, T> functor;
functor(context, output, static_cast<T>(0));
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
SelectedRowsAddTensorKernel<
T, block_size><<<grid, threads, 0, context.stream()>>>(
in1_data, in1_rows.CUDAData(context.GetPlace()), out_data,
in1_row_numel);
auto out_eigen = framework::EigenVector<T>::Flatten(*output);
auto in2_eigen = framework::EigenVector<T>::Flatten(input2);
out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen;
}
};
template struct SelectedRowsAddTensor<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddTensor<platform::CUDADeviceContext, double>;
template struct SelectedRowsAdd<platform::CUDADeviceContext, platform::float16>;
template struct SelectedRowsAddTensor<platform::CUDADeviceContext,
platform::float16>;
template <typename T>
struct SelectedRowsAddTo<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const int64_t input2_offset,
framework::SelectedRows* input2) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height, input2->height(),
platform::errors::InvalidArgument("The two inputs height must be equal."
"But recieved first input height = "
"[%d], second input height = [%d]",
in1_height, input2->height()));
auto& in1_rows = input1.rows();
auto& in2_rows = *(input2->mutable_rows());
auto& in1_value = input1.value();
auto* in2_value = input2->mutable_value();
// concat rows
if (in1_rows.size()) {
in2_rows.Extend(in1_rows.begin(), in1_rows.end());
}
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in1_place), true,
platform::errors::InvalidArgument(
"The running enviroment is not on the GPU place."));
auto in2_place = input2->place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in1_place), true,
platform::errors::InvalidArgument(
"The running enviroment is not on the GPU place."));
auto* in1_data = in1_value.data<T>();
auto* in2_data = in2_value->data<T>();
memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, in2_place),
in2_data + input2_offset,
BOOST_GET_CONST(platform::CUDAPlace, in1_place), in1_data,
in1_value.numel() * sizeof(T), context.stream());
}
};
template struct SelectedRowsAddTo<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, double>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, int>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, int64_t>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext,
platform::float16>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddToTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddToTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
framework::Tensor* input2) {
auto in1_height = input1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height, in2_dims[0],
platform::errors::InvalidArgument("The two inputs height must be equal."
"But recieved first input height = "
"[%d], second input height = [%d]",
in1_height, in2_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel, input2->numel() / in1_height,
platform::errors::InvalidArgument(
"The two inputs width must be equal."
"But recieved first input width = [%d], second input width = [%d]",
in1_row_numel, input2->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
SelectedRowsAddToTensorKernel<
T, block_size><<<grid, threads, 0, context.stream()>>>(
in1_data, in1_rows.CUDAData(context.GetPlace()), in2_data,
in1_row_numel);
}
};
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, double>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int64_t>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext,
platform::float16>;
namespace scatter {
template <typename T, int block_size>
__global__ void MergeAddKernel(const T* input, const int64_t* input_rows,
T* out, const int64_t* out_rows,
size_t out_rows_size, int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
__shared__ size_t out_idx;
if (tid == 0) {
for (size_t i = 0; i < out_rows_size; i++) {
if (input_rows[ty] == out_rows[i]) {
out_idx = i;
}
}
}
__syncthreads();
input += ty * row_numel;
out += out_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
paddle::platform::CudaAtomicAdd(out + index, input[index]);
}
}
template <typename T>
struct MergeAdd<platform::CUDADeviceContext, T> {
framework::SelectedRows operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input,
const bool sorted_result = false) {
framework::SelectedRows out;
(*this)(context, input, &out);
return out;
}
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input,
framework::SelectedRows* output,
const bool sorted_result = false) {
framework::Vector<int64_t> input_rows(input.rows());
if (input_rows.size() == 0) {
return;
}
framework::SelectedRows& out = *output;
std::set<int64_t> row_set(input_rows.begin(), input_rows.end());
std::vector<int64_t> merge_rows_cpu(row_set.begin(), row_set.end());
framework::Vector<int64_t> merge_rows(merge_rows_cpu);
auto input_width = input.value().dims()[1];
out.set_rows(merge_rows);
out.set_height(input.height());
out.mutable_value()->mutable_data<T>(
framework::make_ddim(
{static_cast<int64_t>(merge_rows.size()), input_width}),
context.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
auto* input_data = input.value().data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid1(input_rows.size(), 1);
MergeAddKernel<T, 256><<<grid1, threads, 0, context.stream()>>>(
input_data, input_rows.CUDAData(context.GetPlace()), out_data,
out.mutable_rows()->CUDAMutableData(context.GetPlace()),
out.rows().size(), input_width);
}
void operator()(const platform::CUDADeviceContext& context,
const std::vector<const framework::SelectedRows*>& inputs,
framework::SelectedRows* output,
const bool sorted_result = false) {
if (inputs.size() == 0) {
VLOG(3) << "no input! return";
return;
}
const framework::SelectedRows* has_value_input = nullptr;
for (auto* in : inputs) {
if (in->rows().size() > 0) {
has_value_input = in;
break;
}
}
if (has_value_input == nullptr) {
VLOG(3) << "no input has value! just return" << std::endl;
return;
}
auto input_width = has_value_input->value().dims()[1];
auto input_height = has_value_input->height();
framework::SelectedRows& out = *output;
std::set<int64_t> merged_row_set;
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
PADDLE_ENFORCE_EQ(input_width, input->value().dims()[1],
platform::errors::InvalidArgument(
"All input should have same "
"dimension except for the first one."));
PADDLE_ENFORCE_EQ(input_height, input->height(),
platform::errors::InvalidArgument(
"All input should have same height."));
merged_row_set.insert(input->rows().begin(), input->rows().end());
}
std::vector<int64_t> merge_rows_cpu(merged_row_set.begin(),
merged_row_set.end());
framework::Vector<int64_t> merge_rows(merge_rows_cpu);
out.set_rows(merge_rows);
out.set_height(input_height);
out.mutable_value()->mutable_data<T>(
framework::make_ddim(
{static_cast<int64_t>(merge_rows.size()), input_width}),
context.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
auto* input_data = input->value().data<T>();
auto& input_rows = input->rows();
dim3 grid1(input_rows.size(), 1);
MergeAddKernel<T, 256><<<grid1, threads, 0, context.stream()>>>(
input_data, input_rows.CUDAData(context.GetPlace()), out_data,
out.mutable_rows()->CUDAMutableData(context.GetPlace()),
out.rows().size(), input_width);
}
}
};
template struct MergeAdd<platform::CUDADeviceContext, float>;
template struct MergeAdd<platform::CUDADeviceContext, double>;
template struct MergeAdd<platform::CUDADeviceContext, int>;
template struct MergeAdd<platform::CUDADeviceContext, int64_t>;
template struct MergeAdd<platform::CUDADeviceContext, platform::float16>;
template struct MergeAdd<platform::CUDADeviceContext, platform::complex<float>>;
template struct MergeAdd<platform::CUDADeviceContext,
platform::complex<double>>;
template <typename T, int block_size>
__global__ void UpdateToTensorKernel(const T* selected_rows,
const int64_t* rows, const ScatterOps& op,
T* tensor_out, int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
// FIXME(typhoonzero): use macro fix the below messy code.
switch (op) {
case ScatterOps::ASSIGN:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index];
}
break;
case ScatterOps::ADD:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] += selected_rows[index];
}
break;
case ScatterOps::SUB:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] -= selected_rows[index];
}
break;
case ScatterOps::SUBBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] - tensor_out[index];
}
break;
case ScatterOps::MUL:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] *= selected_rows[index];
}
break;
case ScatterOps::DIV:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] /= selected_rows[index];
}
break;
case ScatterOps::DIVBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] / tensor_out[index];
}
break;
}
}
template <typename T>
struct UpdateToTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const ScatterOps& op, const framework::SelectedRows& input1,
framework::Tensor* input2) {
// NOTE: Use SelectedRowsAddToTensor for better performance
// no additional MergeAdd called.
MergeAdd<platform::CUDADeviceContext, T> merge_func;
auto merged_in1 = merge_func(context, input1);
auto in1_height = merged_in1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height, in2_dims[0],
platform::errors::InvalidArgument("The two inputs height must be equal."
"But recieved first input height = "
"[%d], second input height = [%d]",
in1_height, in2_dims[0]));
auto& in1_value = merged_in1.value();
auto& in1_rows = merged_in1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel, input2->numel() / in1_height,
platform::errors::InvalidArgument(
"The two inputs width must be equal."
"But recieved first input width = [%d], second input width = [%d]",
in1_row_numel, input2->numel() / in1_height));
auto* in1_data = in1_value.template data<T>();
auto* in2_data = input2->data<T>();
dim3 threads(platform::PADDLE_CUDA_NUM_THREADS, 1);
dim3 grid(in1_rows.size(), 1);
UpdateToTensorKernel<T, platform::PADDLE_CUDA_NUM_THREADS><<<
grid, threads, 0, context.stream()>>>(in1_data, in1_rows.cuda_data(),
op, in2_data, in1_row_numel);
}
};
} // namespace scatter
} // namespace math
} // namespace operators
} // namespace paddle
|
4f21b4d645fd1277931cc2351e01e7472913e84d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "../include/activations.cuh"
/* activation kernels */
__device__ float sigmoid_activation_kernel(float x){
float y = 1./(1.+expf(-x));
// printf("%.2f ",y);
return y;
}
__device__ float tanh_activation_kernel(float x) {
return (2./(1+exp(-2*x))-1);
}
__device__ float relu_activation_kernel(float x){
return x>0? x:0;
}
__device__ float elu_activation_kernel(float x){
if (x>=0) {
return x;
} else {
return (0.01*exp(-x)+1);
}
}
__device__ float linear_activation_kernel(float x){return x;}
__device__ float logistic_activation_kernel(float x){return 1./(1.+exp(-x));}
/* gradient kernels */
__device__ float sigmoid_gradient_kernel(float x){
return (-x*exp(-x));
}
__device__ float tanh_gradient_kernel(float x){
if (x>-1 && x< 1) {
return 1;
} else{
return 0;
}
}
__device__ float relu_gradient_kernel(float x) {
if (x>0) {
return 1;
} else {
return 0;
}
}
__device__ float elu_gradient_kernel(float x) {
if (x>=0) {
return 1;
} else {
return (-0.01*exp(-x));
}
}
__device__ float logistic_gradient_kernel(float x){return (1-x)*x;}
__device__ float select_activation(float x, Activation a) {
switch (a) {
case SIGMOID:
return sigmoid_activation_kernel(x);
break;
case TANH:
return tanh_activation_kernel(x);
break;
case RELU:
return relu_activation_kernel(x);
break;
case ELU:
return elu_activation_kernel(x);
break;
case LINEAR:
return linear_activation_kernel(x);
break;
case LOGISTIC:
return logistic_activation_kernel(x);
break;
default:
return relu_activation_kernel(x);
break;
}
return 0;
}
__global__ void launch_activations_on_gpu(float *x,int numElems,Activation a, float *y){
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < numElems) {
y[i] = select_activation(x[i],a);
// printf("i:%d, %.2f \n",i,y[idx]);
// id += gridDim.x*blockIdx.x;
}
}
void activations_on_gpu(float *x, int numelems, Activation a, float *y_data){
hipError_t err;
int k = (numelems-1)/BLOCK_L+1;
int y = 1;
int x_dir = k;
dim3 grid(x_dir,y,1);
// printf("Launcing the requested kernels...\n");
hipLaunchKernelGGL(( launch_activations_on_gpu), dim3(grid), dim3(BLOCK_L), 0, 0, x,numelems,a,y_data);
err = hipGetLastError();
// printf("err: %d", err);
if (err != hipSuccess) {
printf("Can't launch the activation kernels.\n");
}
}
|
4f21b4d645fd1277931cc2351e01e7472913e84d.cu
|
#include <stdio.h>
#include <cuda.h>
#include "../include/activations.cuh"
/* activation kernels */
__device__ float sigmoid_activation_kernel(float x){
float y = 1./(1.+expf(-x));
// printf("%.2f ",y);
return y;
}
__device__ float tanh_activation_kernel(float x) {
return (2./(1+exp(-2*x))-1);
}
__device__ float relu_activation_kernel(float x){
return x>0? x:0;
}
__device__ float elu_activation_kernel(float x){
if (x>=0) {
return x;
} else {
return (0.01*exp(-x)+1);
}
}
__device__ float linear_activation_kernel(float x){return x;}
__device__ float logistic_activation_kernel(float x){return 1./(1.+exp(-x));}
/* gradient kernels */
__device__ float sigmoid_gradient_kernel(float x){
return (-x*exp(-x));
}
__device__ float tanh_gradient_kernel(float x){
if (x>-1 && x< 1) {
return 1;
} else{
return 0;
}
}
__device__ float relu_gradient_kernel(float x) {
if (x>0) {
return 1;
} else {
return 0;
}
}
__device__ float elu_gradient_kernel(float x) {
if (x>=0) {
return 1;
} else {
return (-0.01*exp(-x));
}
}
__device__ float logistic_gradient_kernel(float x){return (1-x)*x;}
__device__ float select_activation(float x, Activation a) {
switch (a) {
case SIGMOID:
return sigmoid_activation_kernel(x);
break;
case TANH:
return tanh_activation_kernel(x);
break;
case RELU:
return relu_activation_kernel(x);
break;
case ELU:
return elu_activation_kernel(x);
break;
case LINEAR:
return linear_activation_kernel(x);
break;
case LOGISTIC:
return logistic_activation_kernel(x);
break;
default:
return relu_activation_kernel(x);
break;
}
return 0;
}
__global__ void launch_activations_on_gpu(float *x,int numElems,Activation a, float *y){
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < numElems) {
y[i] = select_activation(x[i],a);
// printf("i:%d, %.2f \n",i,y[idx]);
// id += gridDim.x*blockIdx.x;
}
}
void activations_on_gpu(float *x, int numelems, Activation a, float *y_data){
cudaError_t err;
int k = (numelems-1)/BLOCK_L+1;
int y = 1;
int x_dir = k;
dim3 grid(x_dir,y,1);
// printf("Launcing the requested kernels...\n");
launch_activations_on_gpu<<< grid, BLOCK_L>>> (x,numelems,a,y_data);
err = cudaGetLastError();
// printf("err: %d", err);
if (err != cudaSuccess) {
printf("Can't launch the activation kernels.\n");
}
}
|
49a68efaf1e5beaba51bdff7cf865804a0672563.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/strings/detail/converters.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <strings/utilities.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <map>
#include <vector>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Units for timestamp conversion.
* These are defined since there are more than what cudf supports.
*/
enum class timestamp_units {
years, ///< precision is years
months, ///< precision is months
days, ///< precision is days
hours, ///< precision is hours
minutes, ///< precision is minutes
seconds, ///< precision is seconds
ms, ///< precision is milliseconds
us, ///< precision is microseconds
ns ///< precision is nanoseconds
};
// used to index values in a timeparts array
enum timestamp_parse_component {
TP_YEAR = 0,
TP_MONTH = 1,
TP_DAY = 2,
TP_DAY_OF_YEAR = 3,
TP_HOUR = 4,
TP_MINUTE = 5,
TP_SECOND = 6,
TP_SUBSECOND = 7,
TP_TZ_MINUTES = 8,
TP_ARRAYSIZE = 9
};
enum class format_char_type : int8_t {
literal, // literal char type passed through
specifier // timestamp format specifier
};
/**
* @brief Represents a format specifier or literal from a timestamp format string.
*
* Created by the format_compiler when parsing a format string.
*/
struct alignas(4) format_item {
format_char_type item_type; // specifier or literal indicator
char value; // specifier or literal value
int8_t length; // item length in bytes
static format_item new_specifier(char format_char, int8_t length)
{
return format_item{format_char_type::specifier, format_char, length};
}
static format_item new_delimiter(char literal)
{
return format_item{format_char_type::literal, literal, 1};
}
};
/**
* @brief The format_compiler parses a timestamp format string into a vector of
* format_items.
*
* The vector of format_items are used when parsing a string into timestamp
* components and when formatting a string from timestamp components.
*/
struct format_compiler {
std::string format;
std::string template_string;
timestamp_units units;
rmm::device_vector<format_item> d_items;
std::map<char, int8_t> specifier_lengths = {{'Y', 4},
{'y', 2},
{'m', 2},
{'d', 2},
{'H', 2},
{'I', 2},
{'M', 2},
{'S', 2},
{'f', 6},
{'z', 5},
{'Z', 3},
{'p', 2},
{'j', 3}};
format_compiler(const char* format, timestamp_units units) : format(format), units(units) {}
format_item const* compile_to_device()
{
std::vector<format_item> items;
const char* str = format.c_str();
auto length = format.length();
while (length > 0) {
char ch = *str++;
length--;
if (ch != '%') {
items.push_back(format_item::new_delimiter(ch));
template_string.append(1, ch);
continue;
}
CUDF_EXPECTS(length > 0, "Unfinished specifier in timestamp format");
ch = *str++;
length--;
if (ch == '%') // escaped % char
{
items.push_back(format_item::new_delimiter(ch));
template_string.append(1, ch);
continue;
}
if (ch >= '0' && ch <= '9') {
CUDF_EXPECTS(*str == 'f', "precision not supported for specifier: " + std::string(1, *str));
specifier_lengths[*str] = static_cast<int8_t>(ch - '0');
ch = *str++;
length--;
}
CUDF_EXPECTS(specifier_lengths.find(ch) != specifier_lengths.end(),
"invalid format specifier: " + std::string(1, ch));
int8_t spec_length = specifier_lengths[ch];
items.push_back(format_item::new_specifier(ch, spec_length));
template_string.append((size_t)spec_length, ch);
}
// create program in device memory
d_items.resize(items.size());
CUDA_TRY(hipMemcpyAsync(
d_items.data().get(), items.data(), items.size() * sizeof(items[0]), hipMemcpyHostToDevice));
return d_items.data().get();
}
// these calls are only valid after compile_to_device is called
size_type template_bytes() const { return static_cast<size_type>(template_string.size()); }
size_type items_count() const { return static_cast<size_type>(d_items.size()); }
int8_t subsecond_precision() const { return specifier_lengths.at('f'); }
};
// this parses date/time characters into a timestamp integer
template <typename T> // timestamp type
struct parse_datetime {
column_device_view const d_strings;
format_item const* d_format_items;
size_type items_count;
timestamp_units units;
int8_t subsecond_precision;
/**
* @brief Return power of ten value given an exponent.
*
* @return `1x10^exponent` for `0 <= exponent <= 9`
*/
__device__ constexpr int64_t power_of_ten(int32_t exponent)
{
constexpr int64_t powers_of_ten[] = {
1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L, 100000000L, 1000000000L};
return powers_of_ten[exponent];
}
//
__device__ int32_t str2int(const char* str, size_type bytes)
{
const char* ptr = str;
int32_t value = 0;
for (size_type idx = 0; idx < bytes; ++idx) {
char chr = *ptr++;
if (chr < '0' || chr > '9') break;
value = (value * 10) + static_cast<int32_t>(chr - '0');
}
return value;
}
// Walk the format_items to read the datetime string.
// Returns 0 if all ok.
__device__ int parse_into_parts(string_view const& d_string, int32_t* timeparts)
{
auto ptr = d_string.data();
auto length = d_string.size_bytes();
for (size_t idx = 0; idx < items_count; ++idx) {
auto item = d_format_items[idx];
if (item.value != 'f')
item.length = static_cast<int8_t>(::min(static_cast<size_type>(item.length), length));
if (item.item_type == format_char_type::literal) {
// static character we'll just skip;
// consume item.length bytes from string
ptr += item.length;
length -= item.length;
continue;
}
// special logic for each specifier
switch (item.value) {
case 'Y': timeparts[TP_YEAR] = str2int(ptr, item.length); break;
case 'y': timeparts[TP_YEAR] = str2int(ptr, item.length) + 1900; break;
case 'm': timeparts[TP_MONTH] = str2int(ptr, item.length); break;
case 'd': timeparts[TP_DAY] = str2int(ptr, item.length); break;
case 'j': timeparts[TP_DAY_OF_YEAR] = str2int(ptr, item.length); break;
case 'H':
case 'I': timeparts[TP_HOUR] = str2int(ptr, item.length); break;
case 'M': timeparts[TP_MINUTE] = str2int(ptr, item.length); break;
case 'S': timeparts[TP_SECOND] = str2int(ptr, item.length); break;
case 'f': {
int32_t const read_size = ::min(static_cast<int32_t>(item.length), length);
int64_t const fraction = str2int(ptr, read_size) * power_of_ten(item.length - read_size);
timeparts[TP_SUBSECOND] = static_cast<int32_t>(fraction);
break;
}
case 'p': {
string_view am_pm(ptr, 2);
auto hour = timeparts[TP_HOUR];
if ((am_pm.compare("AM", 2) == 0) || (am_pm.compare("am", 2) == 0)) {
if (hour == 12) hour = 0;
} else if (hour < 12)
hour += 12;
timeparts[TP_HOUR] = hour;
break;
}
case 'z': {
int sign = *ptr == '-' ? 1 : -1; // revert timezone back to UTC
int hh = str2int(ptr + 1, 2);
int mm = str2int(ptr + 3, 2);
// ignoring the rest for now
// item.length has how many chars we should read
timeparts[TP_TZ_MINUTES] = sign * ((hh * 60) + mm);
break;
}
case 'Z': break; // skip
default: return 3;
}
ptr += item.length;
length -= item.length;
}
return 0;
}
__device__ int64_t timestamp_from_parts(int32_t const* timeparts, timestamp_units units)
{
auto year = timeparts[TP_YEAR];
if (units == timestamp_units::years) return year - 1970;
auto month = timeparts[TP_MONTH];
if (units == timestamp_units::months)
return ((year - 1970) * 12) + (month - 1); // months are 1-12, need to 0-base it here
auto day = timeparts[TP_DAY];
// The months are shifted so that March is the starting month and February
// (possible leap day in it) is the last month for the linear calculation
year -= (month <= 2) ? 1 : 0;
// date cycle repeats every 400 years (era)
constexpr int32_t erasInDays = 146097;
constexpr int32_t erasInYears = (erasInDays / 365);
auto era = (year >= 0 ? year : year - 399) / erasInYears;
auto yoe = year - era * erasInYears;
auto doy = month == 0 ? day : ((153 * (month + (month > 2 ? -3 : 9)) + 2) / 5 + day - 1);
auto doe = (yoe * 365) + (yoe / 4) - (yoe / 100) + doy;
int32_t days =
(era * erasInDays) + doe - 719468; // 719468 = days from 0000-00-00 to 1970-03-01
if (units == timestamp_units::days) return days;
auto tzadjust = timeparts[TP_TZ_MINUTES]; // in minutes
auto hour = timeparts[TP_HOUR];
if (units == timestamp_units::hours) return (days * 24L) + hour + (tzadjust / 60);
auto minute = timeparts[TP_MINUTE];
if (units == timestamp_units::minutes)
return static_cast<int64_t>(days * 24L * 60L) + (hour * 60L) + minute + tzadjust;
auto second = timeparts[TP_SECOND];
int64_t timestamp =
(days * 24L * 3600L) + (hour * 3600L) + (minute * 60L) + second + (tzadjust * 60);
if (units == timestamp_units::seconds) return timestamp;
int64_t subsecond =
timeparts[TP_SUBSECOND] * power_of_ten(9 - subsecond_precision); // normalize to nanoseconds
if (units == timestamp_units::ms) {
timestamp *= 1000L;
subsecond = subsecond / 1000000L;
} else if (units == timestamp_units::us) {
timestamp *= 1000000L;
subsecond = subsecond / 1000L;
} else if (units == timestamp_units::ns)
timestamp *= 1000000000L;
timestamp += subsecond;
return timestamp;
}
__device__ T operator()(size_type idx)
{
T epoch_time{typename T::duration{0}};
if (d_strings.is_null(idx)) return epoch_time;
string_view d_str = d_strings.element<string_view>(idx);
if (d_str.empty()) return epoch_time;
//
int32_t timeparts[TP_ARRAYSIZE] = {1970, 1, 1}; // month and day are 1-based
if (parse_into_parts(d_str, timeparts)) return epoch_time; // unexpected parse case
//
return T{T::duration(timestamp_from_parts(timeparts, units))};
}
};
// convert cudf type to timestamp units
struct dispatch_timestamp_to_units_fn {
template <typename T>
timestamp_units operator()()
{
CUDF_FAIL("Invalid type for timestamp conversion.");
}
};
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_D>()
{
return timestamp_units::days;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_s>()
{
return timestamp_units::seconds;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_ms>()
{
return timestamp_units::ms;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_us>()
{
return timestamp_units::us;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_ns>()
{
return timestamp_units::ns;
}
// dispatch operator to map timestamp to native fixed-width-type
struct dispatch_to_timestamps_fn {
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const& d_strings,
std::string const& format,
timestamp_units units,
mutable_column_view& results_view,
hipStream_t stream) const
{
format_compiler compiler(format.c_str(), units);
auto d_items = compiler.compile_to_device();
auto d_results = results_view.data<T>();
parse_datetime<T> pfn{
d_strings, d_items, compiler.items_count(), units, compiler.subsecond_precision()};
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(results_view.size()),
d_results,
pfn);
}
template <typename T, std::enable_if_t<not cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const&,
std::string const&,
timestamp_units,
mutable_column_view&,
hipStream_t) const
{
CUDF_FAIL("Only timestamps type are expected");
}
};
} // namespace
//
std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& strings,
data_type timestamp_type,
std::string const& format,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_timestamp_column(timestamp_type, 0);
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
timestamp_units units = cudf::type_dispatcher(timestamp_type, dispatch_timestamp_to_units_fn());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
auto results = make_timestamp_column(timestamp_type,
strings_count,
copy_bitmask(strings.parent(), stream, mr),
strings.null_count(),
stream,
mr);
auto results_view = results->mutable_view();
cudf::type_dispatcher(
timestamp_type, dispatch_to_timestamps_fn(), d_column, format, units, results_view, stream);
results->set_null_count(strings.null_count());
return results;
}
} // namespace detail
// external API
std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& strings,
data_type timestamp_type,
std::string const& format,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::to_timestamps(strings, timestamp_type, format, hipStream_t{}, mr);
}
namespace detail {
namespace {
// converts a timestamp into date-time string
template <typename T>
struct datetime_formatter {
const column_device_view d_timestamps;
const format_item* d_format_items;
size_type items_count;
timestamp_units units;
const int32_t* d_offsets;
char* d_chars;
__device__ cudf::timestamp_D::duration convert_to_days(int64_t timestamp, timestamp_units units)
{
using namespace simt::std::chrono;
using minutes = duration<timestamp_s::rep, minutes::period>;
using hours = duration<timestamp_s::rep, hours::period>;
switch (units) {
case timestamp_units::minutes: return floor<days>(minutes(timestamp));
case timestamp_units::seconds: return floor<days>(cudf::timestamp_s::duration(timestamp));
case timestamp_units::hours: return floor<days>(hours(timestamp));
case timestamp_units::ms: return floor<days>(cudf::timestamp_ms::duration(timestamp));
case timestamp_units::us: return floor<days>(cudf::timestamp_us::duration(timestamp));
case timestamp_units::ns: return floor<days>(cudf::timestamp_ns::duration(timestamp));
default: return cudf::timestamp_D::duration(timestamp);
}
}
// divide timestamp integer into time components (year, month, day, etc)
// TODO call the simt::std::chrono methods here instead when they are ready
__device__ void dissect_timestamp(int64_t timestamp, int32_t* timeparts)
{
if (units == timestamp_units::years) {
timeparts[TP_YEAR] = static_cast<int32_t>(timestamp) + 1970;
timeparts[TP_MONTH] = 1;
timeparts[TP_DAY] = 1;
return;
}
// Specialized modulo expression that handles negative values.
// Examples:
// modulo(1,60) 1
// modulo(-1,60) 59
auto modulo_time = [](int64_t time, int64_t base) {
return static_cast<int32_t>(((time % base) + base) % base);
};
// This function handles converting units by dividing and adjusting for negative values.
// Examples:
// scale(-61,60) -2
// scale(-60,60) -1
// scale(-59,60) -1
// scale( 59,60) 0
// scale( 60,60) 1
// scale( 61,60) 1
auto scale_time = [](int64_t time, int64_t base) {
return static_cast<int32_t>((time - ((time < 0) * (base - 1L))) / base);
};
if (units == timestamp_units::months) {
int32_t month = modulo_time(timestamp, 12);
int32_t year = scale_time(timestamp, 12) + 1970;
timeparts[TP_YEAR] = year;
timeparts[TP_MONTH] = month + 1; // months start at 1 and not 0
timeparts[TP_DAY] = 1;
return;
}
// first, convert to days so we can handle months, years, day of the year.
auto const days = convert_to_days(timestamp, units);
auto const ymd = simt::std::chrono::year_month_day(simt::std::chrono::sys_days(days));
auto const year = static_cast<int32_t>(ymd.year());
auto const month = static_cast<unsigned>(ymd.month());
auto const day = static_cast<unsigned>(ymd.day());
int32_t const monthDayOffset[] = {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334};
timeparts[TP_DAY_OF_YEAR] =
day + monthDayOffset[month - 1] + (month > 2 and ymd.year().is_leap());
timeparts[TP_YEAR] = year;
timeparts[TP_MONTH] = month;
timeparts[TP_DAY] = day;
if (units == timestamp_units::days) return;
// done with date, now work on time
if (units == timestamp_units::hours) {
timeparts[TP_HOUR] = modulo_time(timestamp, 24);
return;
}
if (units == timestamp_units::minutes) {
timeparts[TP_HOUR] = modulo_time(scale_time(timestamp, 60), 24);
timeparts[TP_MINUTE] = modulo_time(timestamp, 60);
return;
}
if (units == timestamp_units::seconds) {
timeparts[TP_HOUR] = modulo_time(scale_time(timestamp, 3600), 24);
timeparts[TP_MINUTE] = modulo_time(scale_time(timestamp, 60), 60);
timeparts[TP_SECOND] = modulo_time(timestamp, 60);
return;
}
// common utility for setting time components from a subsecond unit value
auto subsecond_fn = [&](int64_t subsecond_base) {
timeparts[TP_SUBSECOND] = modulo_time(timestamp, subsecond_base);
timestamp = timestamp / subsecond_base;
timeparts[TP_HOUR] = modulo_time(scale_time(timestamp, 3600), 24);
timeparts[TP_MINUTE] = modulo_time(scale_time(timestamp, 60), 60);
timeparts[TP_SECOND] = modulo_time(timestamp, 60);
};
if (units == timestamp_units::ms)
subsecond_fn(1000);
else if (units == timestamp_units::us)
subsecond_fn(1000000);
else
subsecond_fn(1000000000);
}
// utility to create 0-padded integers (up to 9 chars)
__device__ char* int2str(char* str, int bytes, int val)
{
char tmpl[9] = {'0', '0', '0', '0', '0', '0', '0', '0', '0'};
char* ptr = tmpl;
while (val > 0) {
int digit = val % 10;
*ptr++ = '0' + digit;
val = val / 10;
}
ptr = tmpl + bytes - 1;
while (bytes-- > 0) *str++ = *ptr--;
return str;
}
__device__ char* format_from_parts(int32_t const* timeparts, char* ptr)
{
for (size_t idx = 0; idx < items_count; ++idx) {
auto item = d_format_items[idx];
if (item.item_type == format_char_type::literal) {
*ptr++ = item.value;
continue;
}
// special logic for each specifier
switch (item.value) {
case 'Y': // 4-digit year
ptr = int2str(ptr, item.length, timeparts[TP_YEAR]);
break;
case 'y': // 2-digit year
{
auto year = timeparts[TP_YEAR];
// remove hundredths digits and above
ptr = int2str(ptr, item.length, year - ((year / 100) * 100));
break;
}
case 'm': // month
ptr = int2str(ptr, item.length, timeparts[TP_MONTH]);
break;
case 'd': // day of month
ptr = int2str(ptr, item.length, timeparts[TP_DAY]);
break;
case 'j': // day of year
ptr = int2str(ptr, item.length, timeparts[TP_DAY_OF_YEAR]);
break;
case 'H': // 24-hour
ptr = int2str(ptr, item.length, timeparts[TP_HOUR]);
break;
case 'I': // 12-hour
{
// 0 = 12am; 12 = 12pm; 6 = 06am; 18 = 06pm
auto hour = timeparts[TP_HOUR];
if (hour == 0) hour = 12;
if (hour > 12) hour -= 12;
ptr = int2str(ptr, item.length, hour);
break;
}
case 'M': // minute
ptr = int2str(ptr, item.length, timeparts[TP_MINUTE]);
break;
case 'S': // second
ptr = int2str(ptr, item.length, timeparts[TP_SECOND]);
break;
case 'f': // sub-second
{
char subsecond_digits[] = "000000000"; // 9 max digits
const int digits = [units = units] {
if (units == timestamp_units::ms) return 3;
if (units == timestamp_units::us) return 6;
if (units == timestamp_units::ns) return 9;
return 0;
}();
int2str(subsecond_digits, digits, timeparts[TP_SUBSECOND]);
ptr = copy_and_increment(ptr, subsecond_digits, item.length);
break;
}
case 'p': // am or pm
// 0 = 12am, 12 = 12pm
if (timeparts[TP_HOUR] < 12)
memcpy(ptr, "AM", 2);
else
memcpy(ptr, "PM", 2);
ptr += 2;
break;
case 'z': // timezone
memcpy(ptr, "+0000", 5); // always UTC
ptr += 5;
break;
case 'Z':
memcpy(ptr, "UTC", 3);
ptr += 3;
break;
default: // ignore everything else
break;
}
}
return ptr;
}
__device__ void operator()(size_type idx)
{
if (d_timestamps.is_null(idx)) return;
auto timestamp = d_timestamps.element<T>(idx);
int32_t timeparts[TP_ARRAYSIZE] = {0};
dissect_timestamp(timestamp.time_since_epoch().count(), timeparts);
// convert to characters
format_from_parts(timeparts, d_chars + d_offsets[idx]);
}
};
//
struct dispatch_from_timestamps_fn {
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const& d_timestamps,
format_item const* d_format_items,
size_type items_count,
timestamp_units units,
const int32_t* d_offsets,
char* d_chars,
hipStream_t stream) const
{
datetime_formatter<T> pfn{d_timestamps, d_format_items, items_count, units, d_offsets, d_chars};
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
d_timestamps.size(),
pfn);
}
template <typename T, std::enable_if_t<not cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const&,
format_item const*,
size_type,
timestamp_units,
const int32_t*,
char* d_chars,
hipStream_t stream) const
{
CUDF_FAIL("Only timestamps type are expected");
}
};
} // namespace
//
std::unique_ptr<column> from_timestamps(column_view const& timestamps,
std::string const& format,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = timestamps.size();
if (strings_count == 0) return make_empty_strings_column(mr, stream);
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
timestamp_units units =
cudf::type_dispatcher(timestamps.type(), dispatch_timestamp_to_units_fn());
format_compiler compiler(format.c_str(), units);
auto d_format_items = compiler.compile_to_device();
auto column = column_device_view::create(timestamps, stream);
auto d_column = *column;
// copy null mask
rmm::device_buffer null_mask = copy_bitmask(timestamps, stream, mr);
// Each string will be the same number of bytes which can be determined
// directly from the format string.
auto d_str_bytes = compiler.template_bytes(); // size in bytes of each string
// build offsets column
auto offsets_transformer_itr =
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
[d_column, d_str_bytes] __device__(size_type idx) {
return (d_column.is_null(idx) ? 0 : d_str_bytes);
});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto offsets_view = offsets_column->view();
auto d_new_offsets = offsets_view.template data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_new_offsets)[strings_count];
auto chars_column =
create_chars_child_column(strings_count, timestamps.null_count(), bytes, mr, stream);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.template data<char>();
// fill in chars column with timestamps
// dispatcher is called to handle the different timestamp types
cudf::type_dispatcher(timestamps.type(),
dispatch_from_timestamps_fn(),
d_column,
d_format_items,
compiler.items_count(),
units,
d_new_offsets,
d_chars,
stream);
//
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
timestamps.null_count(),
std::move(null_mask),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> from_timestamps(column_view const& timestamps,
std::string const& format,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::from_timestamps(timestamps, format, hipStream_t{}, mr);
}
} // namespace strings
} // namespace cudf
|
49a68efaf1e5beaba51bdff7cf865804a0672563.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/strings/detail/converters.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <strings/utilities.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <map>
#include <vector>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Units for timestamp conversion.
* These are defined since there are more than what cudf supports.
*/
enum class timestamp_units {
years, ///< precision is years
months, ///< precision is months
days, ///< precision is days
hours, ///< precision is hours
minutes, ///< precision is minutes
seconds, ///< precision is seconds
ms, ///< precision is milliseconds
us, ///< precision is microseconds
ns ///< precision is nanoseconds
};
// used to index values in a timeparts array
enum timestamp_parse_component {
TP_YEAR = 0,
TP_MONTH = 1,
TP_DAY = 2,
TP_DAY_OF_YEAR = 3,
TP_HOUR = 4,
TP_MINUTE = 5,
TP_SECOND = 6,
TP_SUBSECOND = 7,
TP_TZ_MINUTES = 8,
TP_ARRAYSIZE = 9
};
enum class format_char_type : int8_t {
literal, // literal char type passed through
specifier // timestamp format specifier
};
/**
* @brief Represents a format specifier or literal from a timestamp format string.
*
* Created by the format_compiler when parsing a format string.
*/
struct alignas(4) format_item {
format_char_type item_type; // specifier or literal indicator
char value; // specifier or literal value
int8_t length; // item length in bytes
static format_item new_specifier(char format_char, int8_t length)
{
return format_item{format_char_type::specifier, format_char, length};
}
static format_item new_delimiter(char literal)
{
return format_item{format_char_type::literal, literal, 1};
}
};
/**
* @brief The format_compiler parses a timestamp format string into a vector of
* format_items.
*
* The vector of format_items are used when parsing a string into timestamp
* components and when formatting a string from timestamp components.
*/
struct format_compiler {
std::string format;
std::string template_string;
timestamp_units units;
rmm::device_vector<format_item> d_items;
std::map<char, int8_t> specifier_lengths = {{'Y', 4},
{'y', 2},
{'m', 2},
{'d', 2},
{'H', 2},
{'I', 2},
{'M', 2},
{'S', 2},
{'f', 6},
{'z', 5},
{'Z', 3},
{'p', 2},
{'j', 3}};
format_compiler(const char* format, timestamp_units units) : format(format), units(units) {}
format_item const* compile_to_device()
{
std::vector<format_item> items;
const char* str = format.c_str();
auto length = format.length();
while (length > 0) {
char ch = *str++;
length--;
if (ch != '%') {
items.push_back(format_item::new_delimiter(ch));
template_string.append(1, ch);
continue;
}
CUDF_EXPECTS(length > 0, "Unfinished specifier in timestamp format");
ch = *str++;
length--;
if (ch == '%') // escaped % char
{
items.push_back(format_item::new_delimiter(ch));
template_string.append(1, ch);
continue;
}
if (ch >= '0' && ch <= '9') {
CUDF_EXPECTS(*str == 'f', "precision not supported for specifier: " + std::string(1, *str));
specifier_lengths[*str] = static_cast<int8_t>(ch - '0');
ch = *str++;
length--;
}
CUDF_EXPECTS(specifier_lengths.find(ch) != specifier_lengths.end(),
"invalid format specifier: " + std::string(1, ch));
int8_t spec_length = specifier_lengths[ch];
items.push_back(format_item::new_specifier(ch, spec_length));
template_string.append((size_t)spec_length, ch);
}
// create program in device memory
d_items.resize(items.size());
CUDA_TRY(cudaMemcpyAsync(
d_items.data().get(), items.data(), items.size() * sizeof(items[0]), cudaMemcpyHostToDevice));
return d_items.data().get();
}
// these calls are only valid after compile_to_device is called
size_type template_bytes() const { return static_cast<size_type>(template_string.size()); }
size_type items_count() const { return static_cast<size_type>(d_items.size()); }
int8_t subsecond_precision() const { return specifier_lengths.at('f'); }
};
// this parses date/time characters into a timestamp integer
template <typename T> // timestamp type
struct parse_datetime {
column_device_view const d_strings;
format_item const* d_format_items;
size_type items_count;
timestamp_units units;
int8_t subsecond_precision;
/**
* @brief Return power of ten value given an exponent.
*
* @return `1x10^exponent` for `0 <= exponent <= 9`
*/
__device__ constexpr int64_t power_of_ten(int32_t exponent)
{
constexpr int64_t powers_of_ten[] = {
1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L, 100000000L, 1000000000L};
return powers_of_ten[exponent];
}
//
__device__ int32_t str2int(const char* str, size_type bytes)
{
const char* ptr = str;
int32_t value = 0;
for (size_type idx = 0; idx < bytes; ++idx) {
char chr = *ptr++;
if (chr < '0' || chr > '9') break;
value = (value * 10) + static_cast<int32_t>(chr - '0');
}
return value;
}
// Walk the format_items to read the datetime string.
// Returns 0 if all ok.
__device__ int parse_into_parts(string_view const& d_string, int32_t* timeparts)
{
auto ptr = d_string.data();
auto length = d_string.size_bytes();
for (size_t idx = 0; idx < items_count; ++idx) {
auto item = d_format_items[idx];
if (item.value != 'f')
item.length = static_cast<int8_t>(std::min(static_cast<size_type>(item.length), length));
if (item.item_type == format_char_type::literal) {
// static character we'll just skip;
// consume item.length bytes from string
ptr += item.length;
length -= item.length;
continue;
}
// special logic for each specifier
switch (item.value) {
case 'Y': timeparts[TP_YEAR] = str2int(ptr, item.length); break;
case 'y': timeparts[TP_YEAR] = str2int(ptr, item.length) + 1900; break;
case 'm': timeparts[TP_MONTH] = str2int(ptr, item.length); break;
case 'd': timeparts[TP_DAY] = str2int(ptr, item.length); break;
case 'j': timeparts[TP_DAY_OF_YEAR] = str2int(ptr, item.length); break;
case 'H':
case 'I': timeparts[TP_HOUR] = str2int(ptr, item.length); break;
case 'M': timeparts[TP_MINUTE] = str2int(ptr, item.length); break;
case 'S': timeparts[TP_SECOND] = str2int(ptr, item.length); break;
case 'f': {
int32_t const read_size = std::min(static_cast<int32_t>(item.length), length);
int64_t const fraction = str2int(ptr, read_size) * power_of_ten(item.length - read_size);
timeparts[TP_SUBSECOND] = static_cast<int32_t>(fraction);
break;
}
case 'p': {
string_view am_pm(ptr, 2);
auto hour = timeparts[TP_HOUR];
if ((am_pm.compare("AM", 2) == 0) || (am_pm.compare("am", 2) == 0)) {
if (hour == 12) hour = 0;
} else if (hour < 12)
hour += 12;
timeparts[TP_HOUR] = hour;
break;
}
case 'z': {
int sign = *ptr == '-' ? 1 : -1; // revert timezone back to UTC
int hh = str2int(ptr + 1, 2);
int mm = str2int(ptr + 3, 2);
// ignoring the rest for now
// item.length has how many chars we should read
timeparts[TP_TZ_MINUTES] = sign * ((hh * 60) + mm);
break;
}
case 'Z': break; // skip
default: return 3;
}
ptr += item.length;
length -= item.length;
}
return 0;
}
__device__ int64_t timestamp_from_parts(int32_t const* timeparts, timestamp_units units)
{
auto year = timeparts[TP_YEAR];
if (units == timestamp_units::years) return year - 1970;
auto month = timeparts[TP_MONTH];
if (units == timestamp_units::months)
return ((year - 1970) * 12) + (month - 1); // months are 1-12, need to 0-base it here
auto day = timeparts[TP_DAY];
// The months are shifted so that March is the starting month and February
// (possible leap day in it) is the last month for the linear calculation
year -= (month <= 2) ? 1 : 0;
// date cycle repeats every 400 years (era)
constexpr int32_t erasInDays = 146097;
constexpr int32_t erasInYears = (erasInDays / 365);
auto era = (year >= 0 ? year : year - 399) / erasInYears;
auto yoe = year - era * erasInYears;
auto doy = month == 0 ? day : ((153 * (month + (month > 2 ? -3 : 9)) + 2) / 5 + day - 1);
auto doe = (yoe * 365) + (yoe / 4) - (yoe / 100) + doy;
int32_t days =
(era * erasInDays) + doe - 719468; // 719468 = days from 0000-00-00 to 1970-03-01
if (units == timestamp_units::days) return days;
auto tzadjust = timeparts[TP_TZ_MINUTES]; // in minutes
auto hour = timeparts[TP_HOUR];
if (units == timestamp_units::hours) return (days * 24L) + hour + (tzadjust / 60);
auto minute = timeparts[TP_MINUTE];
if (units == timestamp_units::minutes)
return static_cast<int64_t>(days * 24L * 60L) + (hour * 60L) + minute + tzadjust;
auto second = timeparts[TP_SECOND];
int64_t timestamp =
(days * 24L * 3600L) + (hour * 3600L) + (minute * 60L) + second + (tzadjust * 60);
if (units == timestamp_units::seconds) return timestamp;
int64_t subsecond =
timeparts[TP_SUBSECOND] * power_of_ten(9 - subsecond_precision); // normalize to nanoseconds
if (units == timestamp_units::ms) {
timestamp *= 1000L;
subsecond = subsecond / 1000000L;
} else if (units == timestamp_units::us) {
timestamp *= 1000000L;
subsecond = subsecond / 1000L;
} else if (units == timestamp_units::ns)
timestamp *= 1000000000L;
timestamp += subsecond;
return timestamp;
}
__device__ T operator()(size_type idx)
{
T epoch_time{typename T::duration{0}};
if (d_strings.is_null(idx)) return epoch_time;
string_view d_str = d_strings.element<string_view>(idx);
if (d_str.empty()) return epoch_time;
//
int32_t timeparts[TP_ARRAYSIZE] = {1970, 1, 1}; // month and day are 1-based
if (parse_into_parts(d_str, timeparts)) return epoch_time; // unexpected parse case
//
return T{T::duration(timestamp_from_parts(timeparts, units))};
}
};
// convert cudf type to timestamp units
struct dispatch_timestamp_to_units_fn {
template <typename T>
timestamp_units operator()()
{
CUDF_FAIL("Invalid type for timestamp conversion.");
}
};
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_D>()
{
return timestamp_units::days;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_s>()
{
return timestamp_units::seconds;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_ms>()
{
return timestamp_units::ms;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_us>()
{
return timestamp_units::us;
}
template <>
timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_ns>()
{
return timestamp_units::ns;
}
// dispatch operator to map timestamp to native fixed-width-type
struct dispatch_to_timestamps_fn {
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const& d_strings,
std::string const& format,
timestamp_units units,
mutable_column_view& results_view,
cudaStream_t stream) const
{
format_compiler compiler(format.c_str(), units);
auto d_items = compiler.compile_to_device();
auto d_results = results_view.data<T>();
parse_datetime<T> pfn{
d_strings, d_items, compiler.items_count(), units, compiler.subsecond_precision()};
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(results_view.size()),
d_results,
pfn);
}
template <typename T, std::enable_if_t<not cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const&,
std::string const&,
timestamp_units,
mutable_column_view&,
cudaStream_t) const
{
CUDF_FAIL("Only timestamps type are expected");
}
};
} // namespace
//
std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& strings,
data_type timestamp_type,
std::string const& format,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_timestamp_column(timestamp_type, 0);
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
timestamp_units units = cudf::type_dispatcher(timestamp_type, dispatch_timestamp_to_units_fn());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
auto results = make_timestamp_column(timestamp_type,
strings_count,
copy_bitmask(strings.parent(), stream, mr),
strings.null_count(),
stream,
mr);
auto results_view = results->mutable_view();
cudf::type_dispatcher(
timestamp_type, dispatch_to_timestamps_fn(), d_column, format, units, results_view, stream);
results->set_null_count(strings.null_count());
return results;
}
} // namespace detail
// external API
std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& strings,
data_type timestamp_type,
std::string const& format,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::to_timestamps(strings, timestamp_type, format, cudaStream_t{}, mr);
}
namespace detail {
namespace {
// converts a timestamp into date-time string
template <typename T>
struct datetime_formatter {
const column_device_view d_timestamps;
const format_item* d_format_items;
size_type items_count;
timestamp_units units;
const int32_t* d_offsets;
char* d_chars;
__device__ cudf::timestamp_D::duration convert_to_days(int64_t timestamp, timestamp_units units)
{
using namespace simt::std::chrono;
using minutes = duration<timestamp_s::rep, minutes::period>;
using hours = duration<timestamp_s::rep, hours::period>;
switch (units) {
case timestamp_units::minutes: return floor<days>(minutes(timestamp));
case timestamp_units::seconds: return floor<days>(cudf::timestamp_s::duration(timestamp));
case timestamp_units::hours: return floor<days>(hours(timestamp));
case timestamp_units::ms: return floor<days>(cudf::timestamp_ms::duration(timestamp));
case timestamp_units::us: return floor<days>(cudf::timestamp_us::duration(timestamp));
case timestamp_units::ns: return floor<days>(cudf::timestamp_ns::duration(timestamp));
default: return cudf::timestamp_D::duration(timestamp);
}
}
// divide timestamp integer into time components (year, month, day, etc)
// TODO call the simt::std::chrono methods here instead when they are ready
__device__ void dissect_timestamp(int64_t timestamp, int32_t* timeparts)
{
if (units == timestamp_units::years) {
timeparts[TP_YEAR] = static_cast<int32_t>(timestamp) + 1970;
timeparts[TP_MONTH] = 1;
timeparts[TP_DAY] = 1;
return;
}
// Specialized modulo expression that handles negative values.
// Examples:
// modulo(1,60) 1
// modulo(-1,60) 59
auto modulo_time = [](int64_t time, int64_t base) {
return static_cast<int32_t>(((time % base) + base) % base);
};
// This function handles converting units by dividing and adjusting for negative values.
// Examples:
// scale(-61,60) -2
// scale(-60,60) -1
// scale(-59,60) -1
// scale( 59,60) 0
// scale( 60,60) 1
// scale( 61,60) 1
auto scale_time = [](int64_t time, int64_t base) {
return static_cast<int32_t>((time - ((time < 0) * (base - 1L))) / base);
};
if (units == timestamp_units::months) {
int32_t month = modulo_time(timestamp, 12);
int32_t year = scale_time(timestamp, 12) + 1970;
timeparts[TP_YEAR] = year;
timeparts[TP_MONTH] = month + 1; // months start at 1 and not 0
timeparts[TP_DAY] = 1;
return;
}
// first, convert to days so we can handle months, years, day of the year.
auto const days = convert_to_days(timestamp, units);
auto const ymd = simt::std::chrono::year_month_day(simt::std::chrono::sys_days(days));
auto const year = static_cast<int32_t>(ymd.year());
auto const month = static_cast<unsigned>(ymd.month());
auto const day = static_cast<unsigned>(ymd.day());
int32_t const monthDayOffset[] = {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334};
timeparts[TP_DAY_OF_YEAR] =
day + monthDayOffset[month - 1] + (month > 2 and ymd.year().is_leap());
timeparts[TP_YEAR] = year;
timeparts[TP_MONTH] = month;
timeparts[TP_DAY] = day;
if (units == timestamp_units::days) return;
// done with date, now work on time
if (units == timestamp_units::hours) {
timeparts[TP_HOUR] = modulo_time(timestamp, 24);
return;
}
if (units == timestamp_units::minutes) {
timeparts[TP_HOUR] = modulo_time(scale_time(timestamp, 60), 24);
timeparts[TP_MINUTE] = modulo_time(timestamp, 60);
return;
}
if (units == timestamp_units::seconds) {
timeparts[TP_HOUR] = modulo_time(scale_time(timestamp, 3600), 24);
timeparts[TP_MINUTE] = modulo_time(scale_time(timestamp, 60), 60);
timeparts[TP_SECOND] = modulo_time(timestamp, 60);
return;
}
// common utility for setting time components from a subsecond unit value
auto subsecond_fn = [&](int64_t subsecond_base) {
timeparts[TP_SUBSECOND] = modulo_time(timestamp, subsecond_base);
timestamp = timestamp / subsecond_base;
timeparts[TP_HOUR] = modulo_time(scale_time(timestamp, 3600), 24);
timeparts[TP_MINUTE] = modulo_time(scale_time(timestamp, 60), 60);
timeparts[TP_SECOND] = modulo_time(timestamp, 60);
};
if (units == timestamp_units::ms)
subsecond_fn(1000);
else if (units == timestamp_units::us)
subsecond_fn(1000000);
else
subsecond_fn(1000000000);
}
// utility to create 0-padded integers (up to 9 chars)
__device__ char* int2str(char* str, int bytes, int val)
{
char tmpl[9] = {'0', '0', '0', '0', '0', '0', '0', '0', '0'};
char* ptr = tmpl;
while (val > 0) {
int digit = val % 10;
*ptr++ = '0' + digit;
val = val / 10;
}
ptr = tmpl + bytes - 1;
while (bytes-- > 0) *str++ = *ptr--;
return str;
}
__device__ char* format_from_parts(int32_t const* timeparts, char* ptr)
{
for (size_t idx = 0; idx < items_count; ++idx) {
auto item = d_format_items[idx];
if (item.item_type == format_char_type::literal) {
*ptr++ = item.value;
continue;
}
// special logic for each specifier
switch (item.value) {
case 'Y': // 4-digit year
ptr = int2str(ptr, item.length, timeparts[TP_YEAR]);
break;
case 'y': // 2-digit year
{
auto year = timeparts[TP_YEAR];
// remove hundredths digits and above
ptr = int2str(ptr, item.length, year - ((year / 100) * 100));
break;
}
case 'm': // month
ptr = int2str(ptr, item.length, timeparts[TP_MONTH]);
break;
case 'd': // day of month
ptr = int2str(ptr, item.length, timeparts[TP_DAY]);
break;
case 'j': // day of year
ptr = int2str(ptr, item.length, timeparts[TP_DAY_OF_YEAR]);
break;
case 'H': // 24-hour
ptr = int2str(ptr, item.length, timeparts[TP_HOUR]);
break;
case 'I': // 12-hour
{
// 0 = 12am; 12 = 12pm; 6 = 06am; 18 = 06pm
auto hour = timeparts[TP_HOUR];
if (hour == 0) hour = 12;
if (hour > 12) hour -= 12;
ptr = int2str(ptr, item.length, hour);
break;
}
case 'M': // minute
ptr = int2str(ptr, item.length, timeparts[TP_MINUTE]);
break;
case 'S': // second
ptr = int2str(ptr, item.length, timeparts[TP_SECOND]);
break;
case 'f': // sub-second
{
char subsecond_digits[] = "000000000"; // 9 max digits
const int digits = [units = units] {
if (units == timestamp_units::ms) return 3;
if (units == timestamp_units::us) return 6;
if (units == timestamp_units::ns) return 9;
return 0;
}();
int2str(subsecond_digits, digits, timeparts[TP_SUBSECOND]);
ptr = copy_and_increment(ptr, subsecond_digits, item.length);
break;
}
case 'p': // am or pm
// 0 = 12am, 12 = 12pm
if (timeparts[TP_HOUR] < 12)
memcpy(ptr, "AM", 2);
else
memcpy(ptr, "PM", 2);
ptr += 2;
break;
case 'z': // timezone
memcpy(ptr, "+0000", 5); // always UTC
ptr += 5;
break;
case 'Z':
memcpy(ptr, "UTC", 3);
ptr += 3;
break;
default: // ignore everything else
break;
}
}
return ptr;
}
__device__ void operator()(size_type idx)
{
if (d_timestamps.is_null(idx)) return;
auto timestamp = d_timestamps.element<T>(idx);
int32_t timeparts[TP_ARRAYSIZE] = {0};
dissect_timestamp(timestamp.time_since_epoch().count(), timeparts);
// convert to characters
format_from_parts(timeparts, d_chars + d_offsets[idx]);
}
};
//
struct dispatch_from_timestamps_fn {
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const& d_timestamps,
format_item const* d_format_items,
size_type items_count,
timestamp_units units,
const int32_t* d_offsets,
char* d_chars,
cudaStream_t stream) const
{
datetime_formatter<T> pfn{d_timestamps, d_format_items, items_count, units, d_offsets, d_chars};
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
d_timestamps.size(),
pfn);
}
template <typename T, std::enable_if_t<not cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const&,
format_item const*,
size_type,
timestamp_units,
const int32_t*,
char* d_chars,
cudaStream_t stream) const
{
CUDF_FAIL("Only timestamps type are expected");
}
};
} // namespace
//
std::unique_ptr<column> from_timestamps(column_view const& timestamps,
std::string const& format,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = timestamps.size();
if (strings_count == 0) return make_empty_strings_column(mr, stream);
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
timestamp_units units =
cudf::type_dispatcher(timestamps.type(), dispatch_timestamp_to_units_fn());
format_compiler compiler(format.c_str(), units);
auto d_format_items = compiler.compile_to_device();
auto column = column_device_view::create(timestamps, stream);
auto d_column = *column;
// copy null mask
rmm::device_buffer null_mask = copy_bitmask(timestamps, stream, mr);
// Each string will be the same number of bytes which can be determined
// directly from the format string.
auto d_str_bytes = compiler.template_bytes(); // size in bytes of each string
// build offsets column
auto offsets_transformer_itr =
thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
[d_column, d_str_bytes] __device__(size_type idx) {
return (d_column.is_null(idx) ? 0 : d_str_bytes);
});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto offsets_view = offsets_column->view();
auto d_new_offsets = offsets_view.template data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_new_offsets)[strings_count];
auto chars_column =
create_chars_child_column(strings_count, timestamps.null_count(), bytes, mr, stream);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.template data<char>();
// fill in chars column with timestamps
// dispatcher is called to handle the different timestamp types
cudf::type_dispatcher(timestamps.type(),
dispatch_from_timestamps_fn(),
d_column,
d_format_items,
compiler.items_count(),
units,
d_new_offsets,
d_chars,
stream);
//
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
timestamps.null_count(),
std::move(null_mask),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> from_timestamps(column_view const& timestamps,
std::string const& format,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::from_timestamps(timestamps, format, cudaStream_t{}, mr);
}
} // namespace strings
} // namespace cudf
|
8d594fec553709c77bb639a74ee4d90ce13183f2.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2016 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "headers.h"
int main(int argc, char *argv[])
{
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* declare file pointers */
char trainingLabelFilename[] = "train-labels.txt";
char trainingSetFilename[] = "train-images.txt";
char testSetFilename[] = "t10k-images.txt";
char testLabelFilename[] = "t10k-labels.txt";
#if 0
//used for debugging
char theta1Filename[] = "Theta1.txt";
char theta2Filename[] = "Theta2.txt";
#endif
/* define constants */
int const numFeatures = FEATURE_VECTOR_SIZE;
int const numTrainingExamples = TRAINING_SET_SIZE;
int const numTestExamples = TEST_SET_SIZE;
int const numClasses = NUM_OUTPUT_CLASSES;
floatType_t const eps = 0.12;
/* define the arrays going to be used */
float *trainingVector, *trainingMatrix;
float *theta1, *theta2;
float *testVector, *testMatrix;
int *predictVector;
float learningRate;
int batchSize;
int iterations;
int sizeHiddenLayer;
/* read command line args if they're passed */
readCommandLineArgs( argc, argv, &learningRate, &batchSize, &iterations,
&sizeHiddenLayer );
printf("Number of training examples %d\n",numTrainingExamples);
printf("Number of features/pixels per example %d\n",numFeatures);
printf("Number of test examples %d\n",numTestExamples);
/* malloc trainingVector, which are the labels of the trainin sets */
trainingVector = (float *) malloc( sizeof(float) * numTrainingExamples );
if( trainingVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
memset( trainingVector, 0, sizeof(float)*numTrainingExamples );
/* read trainingVector from file */
readMatrixFromFile( trainingLabelFilename, trainingVector,
numTrainingExamples, 1, 1 );
/* malloc the training matrix. each column is a different training
example of 784 pixel values
*/
trainingMatrix = (float *) malloc( sizeof(float) * numTrainingExamples *
(numFeatures+1) );
if( trainingMatrix == NULL )
fprintf(stderr,"Houston more problems\n");
memset( trainingMatrix, 0, sizeof(float)*
numTrainingExamples*(numFeatures+1) );
/* read training examples from file as a matrix
read first column of data into second column of array to leave room for
bias unit of ones
*/
// readMatrixFromFile( trainingSetFilename,
// &trainingMatrix[INDX(0,1,numTrainingExamples)],
// numTrainingExamples, numFeatures );
readMatrixFromFile( trainingSetFilename,
&trainingMatrix[1],
numFeatures, numTrainingExamples, numFeatures+1 );
/* scale the training matrix to 0 to 1, essentially a normalization
technique
*/
floatType_t scale = 1.0 / 256.0;
for( int i = 0; i < (numFeatures+1)*numTrainingExamples; i++ )
trainingMatrix[i] *= scale;
/* malloc the theta1 matrix which are the weights for first layer
*/
theta1 = (float *) malloc( sizeof(float) * sizeHiddenLayer *
(numFeatures + 1 ) );
if( theta1 == NULL )
fprintf(stderr,"Houston more problems\n");
memset( theta1, 0, sizeof(float)*sizeHiddenLayer*(numFeatures+1) );
/* init theta1 with random numbers */
for( int i = 0; i < sizeHiddenLayer*(numFeatures+1); i++ )
{
theta1[i] = double(rand()) / (double(RAND_MAX) + 1.0);
theta1[i] *= (2.0*eps);
theta1[i] -= eps;
} /* end for */
/* malloc the theta2 matrix which are weights for second layer
*/
theta2 = (float *) malloc( sizeof(float) * numClasses *
(sizeHiddenLayer + 1 ) );
if( theta2 == NULL )
fprintf(stderr,"Houston more problems\n");
memset( theta2, 0, sizeof(float)*numClasses*(sizeHiddenLayer+1) );
/* init theta2 from random numbers */
for( int i = 0; i < numClasses*(sizeHiddenLayer+1); i++ )
{
theta2[i] = double(rand()) / (double(RAND_MAX) + 1.0);
theta2[i] *= (2.0*eps);
theta2[i] -= eps;
} /* end for */
/* setup timers using CUDA events */
hipEvent_t start, stop;
checkCUDA( hipEventCreate( &start ) );
checkCUDA( hipEventCreate( &stop ) );
checkCUDA( hipEventRecord( start, 0 ) );
#if 1
/* call the training function. This is a majority of the runtime */
trainNetwork( trainingMatrix, numTrainingExamples, numFeatures+1,
theta1, sizeHiddenLayer, numFeatures+1,
theta2, numClasses, sizeHiddenLayer+1,
trainingVector, learningRate, iterations, batchSize );
#endif
/* report time of training */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
fprintf(stdout, "Total time for training is %.3e sec\n",
elapsedTime/1000.0f );
/* malloc predictVector this is a vector that will be populated by the
predict function, i.e., it will take a set of pixel data and predict
which digit it is, and put those values into a vector */
predictVector = (int *) malloc( sizeof(int) * numTrainingExamples );
if( predictVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
memset( predictVector, 0, sizeof(int)*numTrainingExamples );
/* test prediction on the training examples */
predict( trainingMatrix, numTrainingExamples, numFeatures+1,
theta1, sizeHiddenLayer, numFeatures+1,
theta2, numClasses, sizeHiddenLayer+1,
predictVector );
/* compare the predicted values versus the actual values, of the
training set
*/
floatType_t result = 0.0;
for( int i = 0; i < numTrainingExamples; i++ )
{
if( (int) trainingVector[i] == predictVector[i] )
result += (floatType_t) 1.0;
} /* end for i */
printf("Total correct on training set is %d\n",(int)result);
printf("Prediction rate of training set is %.3f\n",
100.0 * result/(floatType_t)numTrainingExamples);
/* malloc testVector. this is a test set of labels for data
we haven't seen yet.
*/
testVector = (float *) malloc( sizeof(float) * numTestExamples );
if( testVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
memset( testVector, 0, sizeof(float)*numTestExamples );
/* read testVector from file */
readMatrixFromFile( testLabelFilename, testVector,
numTestExamples, 1, 1 );
/* malloc the test matrix. each column is a different test example of data
we haven't seen before.
*/
testMatrix = (float *) malloc( sizeof(float) * numTestExamples *
(numFeatures+1) );
if( testMatrix == NULL )
fprintf(stderr,"Houston more problems\n");
memset( testMatrix, 0, sizeof(float)*
numTestExamples*(numFeatures+1) );
/* read test examples from file as a matrix
read first column of data into second column of array to leave room for
bias unit of ones
*/
readMatrixFromFile( testSetFilename,
&testMatrix[1],
numFeatures, numTestExamples, numFeatures+1 );
/* scale the test matrix to 0 to 1 */
scale = 1.0 / 256.0;
for( int i = 0; i < (numFeatures+1)*numTestExamples; i++ )
testMatrix[i] *= scale;
memset( predictVector, 0, sizeof(int)*numTestExamples );
/* test the prediction of test examples which we haven't trained on
*/
predict( testMatrix, numTestExamples, numFeatures+1,
theta1, sizeHiddenLayer, numFeatures+1,
theta2, numClasses, sizeHiddenLayer+1,
predictVector );
result = 0.0;
for( int i = 0; i < numTestExamples; i++ )
{
if( (int) testVector[i] == predictVector[i] )
result += (floatType_t) 1.0;
} /* end for i */
printf("Total correct on test set is %d\n",(int)result);
printf("Prediction rate of test set is %.3f\n",
100.0 * result/(floatType_t)numTestExamples);
free(trainingVector);
free(trainingMatrix);
free(theta1);
free(theta2);
free(predictVector);
free(testVector);
free(testMatrix);
return 0;
} /* end main */
|
8d594fec553709c77bb639a74ee4d90ce13183f2.cu
|
/*
* Copyright 2016 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "headers.h"
int main(int argc, char *argv[])
{
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* declare file pointers */
char trainingLabelFilename[] = "train-labels.txt";
char trainingSetFilename[] = "train-images.txt";
char testSetFilename[] = "t10k-images.txt";
char testLabelFilename[] = "t10k-labels.txt";
#if 0
//used for debugging
char theta1Filename[] = "Theta1.txt";
char theta2Filename[] = "Theta2.txt";
#endif
/* define constants */
int const numFeatures = FEATURE_VECTOR_SIZE;
int const numTrainingExamples = TRAINING_SET_SIZE;
int const numTestExamples = TEST_SET_SIZE;
int const numClasses = NUM_OUTPUT_CLASSES;
floatType_t const eps = 0.12;
/* define the arrays going to be used */
float *trainingVector, *trainingMatrix;
float *theta1, *theta2;
float *testVector, *testMatrix;
int *predictVector;
float learningRate;
int batchSize;
int iterations;
int sizeHiddenLayer;
/* read command line args if they're passed */
readCommandLineArgs( argc, argv, &learningRate, &batchSize, &iterations,
&sizeHiddenLayer );
printf("Number of training examples %d\n",numTrainingExamples);
printf("Number of features/pixels per example %d\n",numFeatures);
printf("Number of test examples %d\n",numTestExamples);
/* malloc trainingVector, which are the labels of the trainin sets */
trainingVector = (float *) malloc( sizeof(float) * numTrainingExamples );
if( trainingVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
memset( trainingVector, 0, sizeof(float)*numTrainingExamples );
/* read trainingVector from file */
readMatrixFromFile( trainingLabelFilename, trainingVector,
numTrainingExamples, 1, 1 );
/* malloc the training matrix. each column is a different training
example of 784 pixel values
*/
trainingMatrix = (float *) malloc( sizeof(float) * numTrainingExamples *
(numFeatures+1) );
if( trainingMatrix == NULL )
fprintf(stderr,"Houston more problems\n");
memset( trainingMatrix, 0, sizeof(float)*
numTrainingExamples*(numFeatures+1) );
/* read training examples from file as a matrix
read first column of data into second column of array to leave room for
bias unit of ones
*/
// readMatrixFromFile( trainingSetFilename,
// &trainingMatrix[INDX(0,1,numTrainingExamples)],
// numTrainingExamples, numFeatures );
readMatrixFromFile( trainingSetFilename,
&trainingMatrix[1],
numFeatures, numTrainingExamples, numFeatures+1 );
/* scale the training matrix to 0 to 1, essentially a normalization
technique
*/
floatType_t scale = 1.0 / 256.0;
for( int i = 0; i < (numFeatures+1)*numTrainingExamples; i++ )
trainingMatrix[i] *= scale;
/* malloc the theta1 matrix which are the weights for first layer
*/
theta1 = (float *) malloc( sizeof(float) * sizeHiddenLayer *
(numFeatures + 1 ) );
if( theta1 == NULL )
fprintf(stderr,"Houston more problems\n");
memset( theta1, 0, sizeof(float)*sizeHiddenLayer*(numFeatures+1) );
/* init theta1 with random numbers */
for( int i = 0; i < sizeHiddenLayer*(numFeatures+1); i++ )
{
theta1[i] = double(rand()) / (double(RAND_MAX) + 1.0);
theta1[i] *= (2.0*eps);
theta1[i] -= eps;
} /* end for */
/* malloc the theta2 matrix which are weights for second layer
*/
theta2 = (float *) malloc( sizeof(float) * numClasses *
(sizeHiddenLayer + 1 ) );
if( theta2 == NULL )
fprintf(stderr,"Houston more problems\n");
memset( theta2, 0, sizeof(float)*numClasses*(sizeHiddenLayer+1) );
/* init theta2 from random numbers */
for( int i = 0; i < numClasses*(sizeHiddenLayer+1); i++ )
{
theta2[i] = double(rand()) / (double(RAND_MAX) + 1.0);
theta2[i] *= (2.0*eps);
theta2[i] -= eps;
} /* end for */
/* setup timers using CUDA events */
cudaEvent_t start, stop;
checkCUDA( cudaEventCreate( &start ) );
checkCUDA( cudaEventCreate( &stop ) );
checkCUDA( cudaEventRecord( start, 0 ) );
#if 1
/* call the training function. This is a majority of the runtime */
trainNetwork( trainingMatrix, numTrainingExamples, numFeatures+1,
theta1, sizeHiddenLayer, numFeatures+1,
theta2, numClasses, sizeHiddenLayer+1,
trainingVector, learningRate, iterations, batchSize );
#endif
/* report time of training */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
fprintf(stdout, "Total time for training is %.3e sec\n",
elapsedTime/1000.0f );
/* malloc predictVector this is a vector that will be populated by the
predict function, i.e., it will take a set of pixel data and predict
which digit it is, and put those values into a vector */
predictVector = (int *) malloc( sizeof(int) * numTrainingExamples );
if( predictVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
memset( predictVector, 0, sizeof(int)*numTrainingExamples );
/* test prediction on the training examples */
predict( trainingMatrix, numTrainingExamples, numFeatures+1,
theta1, sizeHiddenLayer, numFeatures+1,
theta2, numClasses, sizeHiddenLayer+1,
predictVector );
/* compare the predicted values versus the actual values, of the
training set
*/
floatType_t result = 0.0;
for( int i = 0; i < numTrainingExamples; i++ )
{
if( (int) trainingVector[i] == predictVector[i] )
result += (floatType_t) 1.0;
} /* end for i */
printf("Total correct on training set is %d\n",(int)result);
printf("Prediction rate of training set is %.3f\n",
100.0 * result/(floatType_t)numTrainingExamples);
/* malloc testVector. this is a test set of labels for data
we haven't seen yet.
*/
testVector = (float *) malloc( sizeof(float) * numTestExamples );
if( testVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
memset( testVector, 0, sizeof(float)*numTestExamples );
/* read testVector from file */
readMatrixFromFile( testLabelFilename, testVector,
numTestExamples, 1, 1 );
/* malloc the test matrix. each column is a different test example of data
we haven't seen before.
*/
testMatrix = (float *) malloc( sizeof(float) * numTestExamples *
(numFeatures+1) );
if( testMatrix == NULL )
fprintf(stderr,"Houston more problems\n");
memset( testMatrix, 0, sizeof(float)*
numTestExamples*(numFeatures+1) );
/* read test examples from file as a matrix
read first column of data into second column of array to leave room for
bias unit of ones
*/
readMatrixFromFile( testSetFilename,
&testMatrix[1],
numFeatures, numTestExamples, numFeatures+1 );
/* scale the test matrix to 0 to 1 */
scale = 1.0 / 256.0;
for( int i = 0; i < (numFeatures+1)*numTestExamples; i++ )
testMatrix[i] *= scale;
memset( predictVector, 0, sizeof(int)*numTestExamples );
/* test the prediction of test examples which we haven't trained on
*/
predict( testMatrix, numTestExamples, numFeatures+1,
theta1, sizeHiddenLayer, numFeatures+1,
theta2, numClasses, sizeHiddenLayer+1,
predictVector );
result = 0.0;
for( int i = 0; i < numTestExamples; i++ )
{
if( (int) testVector[i] == predictVector[i] )
result += (floatType_t) 1.0;
} /* end for i */
printf("Total correct on test set is %d\n",(int)result);
printf("Prediction rate of test set is %.3f\n",
100.0 * result/(floatType_t)numTestExamples);
free(trainingVector);
free(trainingMatrix);
free(theta1);
free(theta2);
free(predictVector);
free(testVector);
free(testMatrix);
return 0;
} /* end main */
|
b3d91a01f8d3921a5ec20950ba2783f81fecf197.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include "device_funcs.cuh"
#include <helper_math.h>
#define TX_2D 32
#define TY_2D 32
#define TX 8
#define TY 8
#define TZ 8
#define NUMSTEPS 20
__device__
float2 minDist(float3 *coord, float3 pos, int len, float *d_charge) {
float atomDist = 100.f;
float pcharge = 0.f;
for (int j = 0; j<len; j = j+1){
float dist = sqrtf((coord[j].x - pos.x)*(coord[j].x - pos.x) + (coord[j].y - pos.y)*(coord[j].y - pos.y) +
(coord[j].z - pos.z)*(coord[j].z - pos.z));
pcharge = pcharge + d_charge[j]/(dist*dist);
atomDist=(dist<atomDist)?dist:atomDist;
}
float2 data = {atomDist, pcharge};
return data;
}
__global__
void renderKernel(uchar4 *d_out, float2 *d_vol, int w, int h,
int3 volSize, int method, float zs, float theta, float threshold,
float dist, int id) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r * w;
if ((c >= w) || (r >= h)) return; // Check if within image bounds
const uchar4 background = { 145, 123, 76, 0 };
float3 source = { 0.f, 0.f, -zs };
float3 pix = scrIdxToPos(c, r, w, h, 2 * volSize.z - zs);
// apply viewing transformation: here rotate about y-axis
source = yRotate(source, theta);
pix = yRotate(pix, theta);
// prepare inputs for ray-box intersection
float t0, t1;
const Ray pixRay = {source, pix - source};
float3 center = {volSize.x/2.f, volSize.y/2.f, volSize.z/2.f};
const float3 boxmin = -center;
const float3 boxmax = {volSize.x - center.x, volSize.y - center.y,
volSize.z - center.z};
// perform ray-box intersection test
const bool hitBox = intersectBox(pixRay, boxmin, boxmax, &t0, &t1);
//printf("%d\n", hitBox);
uchar4 shade;
if (!hitBox) shade = background; //miss box => background color
else {
if (t0 < 0.0f) t0 = 0.f; // clamp to 0 to avoid looking backward
// bounded by points where the ray enters and leaves the box
const Ray boxRay = { paramRay(pixRay, t0),
paramRay(pixRay, t1) - paramRay(pixRay, t0) };
if (method == 1) shade =
sliceShader(d_vol, volSize, boxRay, threshold, dist, source, id);
else shade =
rayCastShader(d_vol, volSize, boxRay, threshold);
}
d_out[i] = shade;
}
__global__
void distanceKernel(float2 *d_vol, float3 *d_coords, int3 volSize, float3 voxDim,
float3 boxMin, int len, float *d_charge) {
const int w = volSize.x, h = volSize.y, d = volSize.z;
const int c = blockIdx.x * blockDim.x + threadIdx.x; // column
const int r = blockIdx.y * blockDim.y + threadIdx.y; // row
const int s = blockIdx.z * blockDim.z + threadIdx.z; // stack
const float3 voxelPos = { boxMin.x + (c*voxDim.x)+voxDim.x/2.f, boxMin.y + (r*voxDim.y)+voxDim.y/2.f,
boxMin.z + (s*voxDim.z)+voxDim.z/2.f };
const int i = c + r*w + s*w*h;
if ((c >= w) || (r >= h) || (s >= d)) return;
d_vol[i] = minDist(d_coords, voxelPos, len, d_charge); // compute and store result
}
void kernelLauncher(uchar4 *d_out, float2 *d_vol, int w, int h,
int3 volSize, int method, int zs, float theta, float threshold,
float dist, int id) {
dim3 blockSize(TX_2D, TY_2D);
dim3 gridSize(divUp(w, TX_2D), divUp(h, TY_2D));
hipLaunchKernelGGL(( renderKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, d_vol, w, h, volSize,
method, zs, theta, threshold, dist, id);
}
void volumeKernelLauncher(float2 *d_vol, float3 *d_coords, int3 volSize,
float3 voxDim, float3 boxMin, int atomCount, float *d_charge) {
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(volSize.x, TX), divUp(volSize.y, TY), divUp(volSize.z, TZ));
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_vol, d_coords, volSize, voxDim, boxMin, atomCount, d_charge);
}
|
b3d91a01f8d3921a5ec20950ba2783f81fecf197.cu
|
#include "kernel.h"
#include "device_funcs.cuh"
#include <helper_math.h>
#define TX_2D 32
#define TY_2D 32
#define TX 8
#define TY 8
#define TZ 8
#define NUMSTEPS 20
__device__
float2 minDist(float3 *coord, float3 pos, int len, float *d_charge) {
float atomDist = 100.f;
float pcharge = 0.f;
for (int j = 0; j<len; j = j+1){
float dist = sqrtf((coord[j].x - pos.x)*(coord[j].x - pos.x) + (coord[j].y - pos.y)*(coord[j].y - pos.y) +
(coord[j].z - pos.z)*(coord[j].z - pos.z));
pcharge = pcharge + d_charge[j]/(dist*dist);
atomDist=(dist<atomDist)?dist:atomDist;
}
float2 data = {atomDist, pcharge};
return data;
}
__global__
void renderKernel(uchar4 *d_out, float2 *d_vol, int w, int h,
int3 volSize, int method, float zs, float theta, float threshold,
float dist, int id) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r * w;
if ((c >= w) || (r >= h)) return; // Check if within image bounds
const uchar4 background = { 145, 123, 76, 0 };
float3 source = { 0.f, 0.f, -zs };
float3 pix = scrIdxToPos(c, r, w, h, 2 * volSize.z - zs);
// apply viewing transformation: here rotate about y-axis
source = yRotate(source, theta);
pix = yRotate(pix, theta);
// prepare inputs for ray-box intersection
float t0, t1;
const Ray pixRay = {source, pix - source};
float3 center = {volSize.x/2.f, volSize.y/2.f, volSize.z/2.f};
const float3 boxmin = -center;
const float3 boxmax = {volSize.x - center.x, volSize.y - center.y,
volSize.z - center.z};
// perform ray-box intersection test
const bool hitBox = intersectBox(pixRay, boxmin, boxmax, &t0, &t1);
//printf("%d\n", hitBox);
uchar4 shade;
if (!hitBox) shade = background; //miss box => background color
else {
if (t0 < 0.0f) t0 = 0.f; // clamp to 0 to avoid looking backward
// bounded by points where the ray enters and leaves the box
const Ray boxRay = { paramRay(pixRay, t0),
paramRay(pixRay, t1) - paramRay(pixRay, t0) };
if (method == 1) shade =
sliceShader(d_vol, volSize, boxRay, threshold, dist, source, id);
else shade =
rayCastShader(d_vol, volSize, boxRay, threshold);
}
d_out[i] = shade;
}
__global__
void distanceKernel(float2 *d_vol, float3 *d_coords, int3 volSize, float3 voxDim,
float3 boxMin, int len, float *d_charge) {
const int w = volSize.x, h = volSize.y, d = volSize.z;
const int c = blockIdx.x * blockDim.x + threadIdx.x; // column
const int r = blockIdx.y * blockDim.y + threadIdx.y; // row
const int s = blockIdx.z * blockDim.z + threadIdx.z; // stack
const float3 voxelPos = { boxMin.x + (c*voxDim.x)+voxDim.x/2.f, boxMin.y + (r*voxDim.y)+voxDim.y/2.f,
boxMin.z + (s*voxDim.z)+voxDim.z/2.f };
const int i = c + r*w + s*w*h;
if ((c >= w) || (r >= h) || (s >= d)) return;
d_vol[i] = minDist(d_coords, voxelPos, len, d_charge); // compute and store result
}
void kernelLauncher(uchar4 *d_out, float2 *d_vol, int w, int h,
int3 volSize, int method, int zs, float theta, float threshold,
float dist, int id) {
dim3 blockSize(TX_2D, TY_2D);
dim3 gridSize(divUp(w, TX_2D), divUp(h, TY_2D));
renderKernel<<<gridSize, blockSize>>>(d_out, d_vol, w, h, volSize,
method, zs, theta, threshold, dist, id);
}
void volumeKernelLauncher(float2 *d_vol, float3 *d_coords, int3 volSize,
float3 voxDim, float3 boxMin, int atomCount, float *d_charge) {
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(volSize.x, TX), divUp(volSize.y, TY), divUp(volSize.z, TZ));
distanceKernel<<<gridSize, blockSize>>>(d_vol, d_coords, volSize, voxDim, boxMin, atomCount, d_charge);
}
|
f26a0a7b2c3fa376d1f73aea8dc63c4d8e70b277.hip
|
// !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************/
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <vector>
#include "nccl.h"
#include "test_utilities.h"
template<typename T>
void RunTest(T** buff, const int N, const ncclDataType_t type, const int root,
ncclComm_t* const comms, const std::vector<int>& dList) {
// initialize data
int nDev = 0;
NCCLCHECK(ncclCommCount(comms[0], &nDev));
hipStream_t* s = (hipStream_t*)malloc(sizeof(hipStream_t)*nDev);
T* buffer = (T*)malloc(N * sizeof(T));
T* result = (T*)malloc(N * sizeof(T));
memset(result, 0, N * sizeof(T));
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipStreamCreate(s+i));
if (i == root) {
Randomize(buff[root], N, root);
CUDACHECK(hipMemcpy(result, buff[root], N * sizeof(T),
hipMemcpyDeviceToHost));
} else {
CUDACHECK(hipMemset(buff[i], 0, N * sizeof(T)));
}
CUDACHECK(hipDeviceSynchronize());
}
// warm up GPU
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
NCCLCHECK(ncclBcast((void*)buff[i], ::min(32 * 1024, N), type, root, comms[i], s[i]));
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipStreamSynchronize(s[i]));
}
// for (int n = 1; n <= N; n = n << 1)
{
int n = N;
printf("%12i %12i %6s %4i", (int)(n * sizeof(T)), n,
TypeName(type).c_str(), root);
auto start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
NCCLCHECK(ncclBcast((void*)buff[i], n, type, root, comms[i], s[i]));
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipStreamSynchronize(s[i]));
}
auto stop = std::chrono::high_resolution_clock::now();
double elapsedSec =
std::chrono::duration_cast<std::chrono::duration<double>>(
stop - start).count();
double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec;
double busbw = algbw;
double maxDelta = 0.0;
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
double tmpDelta = CheckDelta<T>(buff[i], result, n);
maxDelta = ::max(tmpDelta, maxDelta);
}
printf(" %7.3f %5.2f %5.2f %7.0le\n", elapsedSec * 1.0E3, algbw, busbw,
maxDelta);
}
for(int i=0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipStreamDestroy(s[i]));
}
free(s);
free(buffer);
free(result);
}
template<typename T>
void RunTests(const int N, const ncclDataType_t type, ncclComm_t* const comms,
const std::vector<int>& dList) {
int nDev = 0;
NCCLCHECK(ncclCommCount(comms[0], &nDev));
T** buff = (T**)malloc(nDev * sizeof(T*));
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipMalloc(buff + i, N * sizeof(T)));
}
//for (int root = 1; root < 2; ++root) {
for (int root = 0; root < nDev; ++root) {
RunTest<T>(buff, N, type, root, comms, dList);
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipFree(buff[i]));
}
free(buff);
}
void usage() {
printf("Tests nccl Broadcast with user supplied arguments.\n"
" Usage: broadcast_test <data size in bytes> [number of GPUs] "
"[GPU 0] [GPU 1] ...\n\n");
}
int main(int argc, char* argv[]) {
int nVis = 0;
CUDACHECK(hipGetDeviceCount(&nVis));
unsigned long long N = 0;
if (argc > 1) {
int t = sscanf(argv[1], "%llu", &N);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[1]);
usage();
exit(EXIT_FAILURE);
}
} else {
printf("Error: must specify at least data size in bytes!\n\n");
usage();
exit(EXIT_FAILURE);
}
int nDev = nVis;
if (argc > 2) {
int t = sscanf(argv[2], "%d", &nDev);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[1]);
usage();
exit(EXIT_FAILURE);
}
}
std::vector<int> dList(nDev);
for (int i = 0; i < nDev; ++i)
dList[i] = i % nVis;
if (argc > 3) {
if (argc - 3 != nDev) {
printf("Error: insufficient number of GPUs in list\n\n");
usage();
exit(EXIT_FAILURE);
}
for (int i = 0; i < nDev; ++i) {
int t = sscanf(argv[3 + i], "%d", dList.data() + i);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[2 + i]);
usage();
exit(EXIT_FAILURE);
}
}
}
ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nDev);;
NCCLCHECK(ncclCommInitAll(comms, nDev, dList.data()));
printf("# Using devices\n");
for (int g = 0; g < nDev; ++g) {
int cudaDev;
int rank;
hipDeviceProp_t prop;
NCCLCHECK(ncclCommCuDevice(comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(comms[g], &rank));
CUDACHECK(hipGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name);
}
printf("\n");
printf("# %10s %12s %6s %4s %7s %5s %5s %7s\n",
"bytes", "N", "type", "root", "time", "algbw", "busbw", "delta");
RunTests<char>(N / sizeof(char), ncclChar, comms, dList);
RunTests<int>(N / sizeof(int), ncclInt, comms, dList);
#ifdef CUDA_HAS_HALF
RunTests<half>(N / sizeof(half), ncclHalf, comms, dList);
#endif
RunTests<float>(N / sizeof(float), ncclFloat, comms, dList);
RunTests<double>(N / sizeof(double), ncclDouble, comms, dList);
RunTests<long long>(N / sizeof(long long), ncclInt64, comms, dList);
RunTests<unsigned long long>(N / sizeof(unsigned long long), ncclUint64, comms, dList);
printf("\n");
for(int i = 0; i < nDev; ++i)
ncclCommDestroy(comms[i]);
free(comms);
exit(EXIT_SUCCESS);
}
|
f26a0a7b2c3fa376d1f73aea8dc63c4d8e70b277.cu
|
/*************************************************************************
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************/
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <vector>
#include "nccl.h"
#include "test_utilities.h"
template<typename T>
void RunTest(T** buff, const int N, const ncclDataType_t type, const int root,
ncclComm_t* const comms, const std::vector<int>& dList) {
// initialize data
int nDev = 0;
NCCLCHECK(ncclCommCount(comms[0], &nDev));
cudaStream_t* s = (cudaStream_t*)malloc(sizeof(cudaStream_t)*nDev);
T* buffer = (T*)malloc(N * sizeof(T));
T* result = (T*)malloc(N * sizeof(T));
memset(result, 0, N * sizeof(T));
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaStreamCreate(s+i));
if (i == root) {
Randomize(buff[root], N, root);
CUDACHECK(cudaMemcpy(result, buff[root], N * sizeof(T),
cudaMemcpyDeviceToHost));
} else {
CUDACHECK(cudaMemset(buff[i], 0, N * sizeof(T)));
}
CUDACHECK(cudaDeviceSynchronize());
}
// warm up GPU
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
NCCLCHECK(ncclBcast((void*)buff[i], std::min(32 * 1024, N), type, root, comms[i], s[i]));
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaStreamSynchronize(s[i]));
}
// for (int n = 1; n <= N; n = n << 1)
{
int n = N;
printf("%12i %12i %6s %4i", (int)(n * sizeof(T)), n,
TypeName(type).c_str(), root);
auto start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
NCCLCHECK(ncclBcast((void*)buff[i], n, type, root, comms[i], s[i]));
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaStreamSynchronize(s[i]));
}
auto stop = std::chrono::high_resolution_clock::now();
double elapsedSec =
std::chrono::duration_cast<std::chrono::duration<double>>(
stop - start).count();
double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec;
double busbw = algbw;
double maxDelta = 0.0;
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
double tmpDelta = CheckDelta<T>(buff[i], result, n);
maxDelta = std::max(tmpDelta, maxDelta);
}
printf(" %7.3f %5.2f %5.2f %7.0le\n", elapsedSec * 1.0E3, algbw, busbw,
maxDelta);
}
for(int i=0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaStreamDestroy(s[i]));
}
free(s);
free(buffer);
free(result);
}
template<typename T>
void RunTests(const int N, const ncclDataType_t type, ncclComm_t* const comms,
const std::vector<int>& dList) {
int nDev = 0;
NCCLCHECK(ncclCommCount(comms[0], &nDev));
T** buff = (T**)malloc(nDev * sizeof(T*));
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaMalloc(buff + i, N * sizeof(T)));
}
//for (int root = 1; root < 2; ++root) {
for (int root = 0; root < nDev; ++root) {
RunTest<T>(buff, N, type, root, comms, dList);
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaFree(buff[i]));
}
free(buff);
}
void usage() {
printf("Tests nccl Broadcast with user supplied arguments.\n"
" Usage: broadcast_test <data size in bytes> [number of GPUs] "
"[GPU 0] [GPU 1] ...\n\n");
}
int main(int argc, char* argv[]) {
int nVis = 0;
CUDACHECK(cudaGetDeviceCount(&nVis));
unsigned long long N = 0;
if (argc > 1) {
int t = sscanf(argv[1], "%llu", &N);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[1]);
usage();
exit(EXIT_FAILURE);
}
} else {
printf("Error: must specify at least data size in bytes!\n\n");
usage();
exit(EXIT_FAILURE);
}
int nDev = nVis;
if (argc > 2) {
int t = sscanf(argv[2], "%d", &nDev);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[1]);
usage();
exit(EXIT_FAILURE);
}
}
std::vector<int> dList(nDev);
for (int i = 0; i < nDev; ++i)
dList[i] = i % nVis;
if (argc > 3) {
if (argc - 3 != nDev) {
printf("Error: insufficient number of GPUs in list\n\n");
usage();
exit(EXIT_FAILURE);
}
for (int i = 0; i < nDev; ++i) {
int t = sscanf(argv[3 + i], "%d", dList.data() + i);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[2 + i]);
usage();
exit(EXIT_FAILURE);
}
}
}
ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nDev);;
NCCLCHECK(ncclCommInitAll(comms, nDev, dList.data()));
printf("# Using devices\n");
for (int g = 0; g < nDev; ++g) {
int cudaDev;
int rank;
cudaDeviceProp prop;
NCCLCHECK(ncclCommCuDevice(comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(comms[g], &rank));
CUDACHECK(cudaGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name);
}
printf("\n");
printf("# %10s %12s %6s %4s %7s %5s %5s %7s\n",
"bytes", "N", "type", "root", "time", "algbw", "busbw", "delta");
RunTests<char>(N / sizeof(char), ncclChar, comms, dList);
RunTests<int>(N / sizeof(int), ncclInt, comms, dList);
#ifdef CUDA_HAS_HALF
RunTests<half>(N / sizeof(half), ncclHalf, comms, dList);
#endif
RunTests<float>(N / sizeof(float), ncclFloat, comms, dList);
RunTests<double>(N / sizeof(double), ncclDouble, comms, dList);
RunTests<long long>(N / sizeof(long long), ncclInt64, comms, dList);
RunTests<unsigned long long>(N / sizeof(unsigned long long), ncclUint64, comms, dList);
printf("\n");
for(int i = 0; i < nDev; ++i)
ncclCommDestroy(comms[i]);
free(comms);
exit(EXIT_SUCCESS);
}
|
232ff3bf990f9bbee8355c91e89bcc47d8e4099c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Matrix Multiplication in gpu with 1D grid of blocks and 1D block shape.
// Example run: https://imgur.com/DHGl22F
// Compile with: nvcc -o test matrix_multiplication_1D.cu -std=c++11
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <chrono>
// Multiplies matrices using GPU with 1D grid
__global__ void multiply_matrix_gpu(long *matA, long *matB, long *matC, const int n) {
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < n) {
for(int j=0; j<n; j++) {
for(int k=0; k<n; k++) {
matC[j*n+ix] += matA[j*n+k] * matB[k*n+ix];
}
}
}
}
// Multiplies matrices in host
void multiply_matrix_host(long *matA, long *matB, long *matC, int n) {
for(int i = 0; i<n; i++) {
for(int j=0; j<n; j++) {
for(int k=0; k<n; k++) {
matC[i*n+j] += matA[i*n+k] * matB[j+k*n];
}
}
}
}
// Compares two matrices
void checkResult(long *hostRef, long *gpuRef, const int n) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < n*n; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("host %ld gpu %ld\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match) printf("Matrix match.\n\n");
else printf("Matrix does not not match.\n\n");
}
int main(int argc, char* argv[]) {
// Set up device
int dev = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("Using Device %d: %s\n", dev, deviceProp.name);
hipSetDevice(dev);
// Size of matrix
int n = 1000;
int bytes = n * n * sizeof(long*);
// Host matrix memory
long *h_a = (long *)malloc(bytes);
long *h_b = (long *)malloc(bytes);
// Results
long *hostRef = (long *)malloc(bytes);
long *gpuRef = (long *)malloc(bytes);
// Initialize matrix on host
for(int i = 0; i < n*n; i++ ) {
h_a[i] = i+1;
h_b[i] = i+1;
}
// Initialize matrix with 0s
memset(hostRef, 0, bytes);
memset(gpuRef, 0, bytes);
// Multiply matrix on host
auto start_cpu = std::chrono::high_resolution_clock::now();
multiply_matrix_host(h_a, h_b, hostRef, n);
auto end_cpu = std::chrono::high_resolution_clock::now();
// Measure total time in host
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multiply_matrix_host elapsed %f ms\n", duration_ms.count());
// Device matrix global memory
long *d_a, *d_b, *d_c;
hipMalloc((void **)&d_a, bytes);
hipMalloc((void **)&d_b, bytes);
hipMalloc((void **)&d_c, bytes);
// Transfer data from host to device
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
hipMemset(d_c, 0, bytes); // Initialize matrix with 0s
// Kernel execution configuration
dim3 block(128);
dim3 grid((n + block.x - 1) / block.x);
printf("grid.x %d block.x %d \n", grid.x, block.x);
// Execute kernel
start_cpu = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( multiply_matrix_gpu), dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c, n);
hipDeviceSynchronize();
end_cpu = std::chrono::high_resolution_clock::now();
// Measure total time in GPU
duration_ms = end_cpu - start_cpu;
printf("multiply_matrix_gpu elapsed %f ms\n", duration_ms.count());
// Copy result from device to host
hipMemcpy(gpuRef, d_c, bytes, hipMemcpyDeviceToHost);
// Check results
checkResult(hostRef, gpuRef, n);
// Free memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(hostRef);
free(gpuRef);
hipDeviceReset();
return 0;
}
|
232ff3bf990f9bbee8355c91e89bcc47d8e4099c.cu
|
// Matrix Multiplication in gpu with 1D grid of blocks and 1D block shape.
// Example run: https://imgur.com/DHGl22F
// Compile with: nvcc -o test matrix_multiplication_1D.cu -std=c++11
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <chrono>
// Multiplies matrices using GPU with 1D grid
__global__ void multiply_matrix_gpu(long *matA, long *matB, long *matC, const int n) {
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < n) {
for(int j=0; j<n; j++) {
for(int k=0; k<n; k++) {
matC[j*n+ix] += matA[j*n+k] * matB[k*n+ix];
}
}
}
}
// Multiplies matrices in host
void multiply_matrix_host(long *matA, long *matB, long *matC, int n) {
for(int i = 0; i<n; i++) {
for(int j=0; j<n; j++) {
for(int k=0; k<n; k++) {
matC[i*n+j] += matA[i*n+k] * matB[j+k*n];
}
}
}
}
// Compares two matrices
void checkResult(long *hostRef, long *gpuRef, const int n) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < n*n; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("host %ld gpu %ld\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match) printf("Matrix match.\n\n");
else printf("Matrix does not not match.\n\n");
}
int main(int argc, char* argv[]) {
// Set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Using Device %d: %s\n", dev, deviceProp.name);
cudaSetDevice(dev);
// Size of matrix
int n = 1000;
int bytes = n * n * sizeof(long*);
// Host matrix memory
long *h_a = (long *)malloc(bytes);
long *h_b = (long *)malloc(bytes);
// Results
long *hostRef = (long *)malloc(bytes);
long *gpuRef = (long *)malloc(bytes);
// Initialize matrix on host
for(int i = 0; i < n*n; i++ ) {
h_a[i] = i+1;
h_b[i] = i+1;
}
// Initialize matrix with 0s
memset(hostRef, 0, bytes);
memset(gpuRef, 0, bytes);
// Multiply matrix on host
auto start_cpu = std::chrono::high_resolution_clock::now();
multiply_matrix_host(h_a, h_b, hostRef, n);
auto end_cpu = std::chrono::high_resolution_clock::now();
// Measure total time in host
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multiply_matrix_host elapsed %f ms\n", duration_ms.count());
// Device matrix global memory
long *d_a, *d_b, *d_c;
cudaMalloc((void **)&d_a, bytes);
cudaMalloc((void **)&d_b, bytes);
cudaMalloc((void **)&d_c, bytes);
// Transfer data from host to device
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
cudaMemset(d_c, 0, bytes); // Initialize matrix with 0s
// Kernel execution configuration
dim3 block(128);
dim3 grid((n + block.x - 1) / block.x);
printf("grid.x %d block.x %d \n", grid.x, block.x);
// Execute kernel
start_cpu = std::chrono::high_resolution_clock::now();
multiply_matrix_gpu<<<grid, block>>>(d_a, d_b, d_c, n);
cudaDeviceSynchronize();
end_cpu = std::chrono::high_resolution_clock::now();
// Measure total time in GPU
duration_ms = end_cpu - start_cpu;
printf("multiply_matrix_gpu elapsed %f ms\n", duration_ms.count());
// Copy result from device to host
cudaMemcpy(gpuRef, d_c, bytes, cudaMemcpyDeviceToHost);
// Check results
checkResult(hostRef, gpuRef, n);
// Free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(hostRef);
free(gpuRef);
cudaDeviceReset();
return 0;
}
|
30a0e77f16c6155766b318e6f1fc4ac9a0122e48.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrixDivisionScalar(double *a, double b, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = a[y * cc + x]/b;
}
}
|
30a0e77f16c6155766b318e6f1fc4ac9a0122e48.cu
|
#include "includes.h"
__global__ void matrixDivisionScalar(double *a, double b, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = a[y * cc + x]/b;
}
}
|
529d99141cb96e8c80ade6a1e811317afbfa3c40.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <arbor/fvm_types.hpp>
#include <arbor/gpu/gpu_api.hpp>
#include <arbor/gpu/gpu_common.hpp>
#include "matrix_common.hpp"
#include "matrix_fine.hpp"
namespace arb {
namespace gpu {
namespace kernels {
/// GPU implementation of Hines matrix assembly.
/// Fine layout.
/// For a given time step size dt:
/// - use the precomputed alpha and alpha_d values to construct the diagonal
/// and off diagonal of the symmetric Hines matrix.
/// - compute the RHS of the linear system to solve.
template <typename T, typename I>
__global__
void assemble_matrix_fine(
T* __restrict__ const d,
T* __restrict__ const rhs,
const T* __restrict__ const invariant_d,
const T* __restrict__ const voltage,
const T* __restrict__ const current,
const T* __restrict__ const conductivity,
const T* __restrict__ const cv_capacitance,
const T* __restrict__ const area,
const T dt,
const I* __restrict__ const perm,
unsigned n)
{
const unsigned tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < n) {
// The 1e-3 is a constant of proportionality required to ensure that the
// conductance (gi) values have units S (micro-Siemens).
// See the model documentation in docs/model for more information.
const auto pid = perm[tid];
const auto area_factor = T(1e-3)*area[tid];
const auto gi = T(1e-3)*cv_capacitance[tid]/dt + area_factor*conductivity[tid];
d[pid] = gi + invariant_d[tid];
rhs[pid] = gi*voltage[tid] - area_factor*current[tid];
}
}
/// GPU implementation of Hines Matrix solver.
/// Fine-grained tree based solver.
/// Each block solves a set of matricesb iterating over the levels of matrix
/// and perfoming a backward and forward substitution. On each level one thread
/// gets assigned to one branch on this level of a matrix and solves and
/// performs the substitution. Afterwards all threads continue on the next
/// level.
/// To avoid idle threads, one should try that on each level, there is a similar
/// number of branches.
template <typename T>
__global__
void solve_matrix_fine(
T* __restrict__ const rhs,
T* __restrict__ const d,
const T* __restrict__ const u,
const level_metadata* __restrict__ const level_meta,
const arb_index_type* __restrict__ const level_lengths,
const arb_index_type* __restrict__ const level_parents,
const arb_index_type* __restrict__ const block_index,
const arb_index_type* __restrict__ const num_matrix) // number of packed matrices = number of cells
{
const auto tid = threadIdx.x;
const auto bid = blockIdx.x;
const auto first_level = block_index[bid];
const auto num_levels = block_index[bid + 1] - first_level;
const auto block_level_meta = &level_meta[first_level];
// backward substitution
for (unsigned l=0; l<num_levels-1; ++l) {
// Metadata for this level and the next level
const auto& lvl_meta = block_level_meta[l];
const auto& next_lvl_meta = block_level_meta[l+1];
// Addresses of the first elements of level_lengths and level_parents
// that belong to this level
const auto lvl_lengths = level_lengths + lvl_meta.level_data_index;
const auto lvl_parents = level_parents + lvl_meta.level_data_index;
const unsigned width = lvl_meta.num_branches;
// Perform backward substitution for each branch on this level.
// One thread per branch.
if (tid < width) {
const unsigned len = lvl_lengths[tid];
unsigned pos = lvl_meta.matrix_data_index + tid;
// Zero diagonal term implies dt==0; just leave rhs (for whole matrix)
// alone in that case.
// Each cell has a different `dt`, because we choose time step size
// according to when the next event is arriving at a cell. So, some
// cells require more time steps than others, but we have to solve
// all the matrices at the same time. When a cell finishes, we put a
// `0` on the diagonal to mark that it should not be solved for.
if (d[pos]!=0) {
// each branch perform substitution
for (unsigned i=0; i<len-1; ++i) {
const unsigned next_pos = pos + width;
const auto d_next = d[next_pos];
const auto rhs_next = rhs[next_pos];
const T factor = -u[pos]/d[pos];
d[next_pos] = fma(factor, u[pos], d_next);
rhs[next_pos] = fma(factor, rhs[pos], rhs_next);
pos = next_pos;
}
// Update d and rhs at the parent node of this branch.
// A parent may have more than one contributing to it, so we use
// atomic updates to avoid races conditions.
const unsigned parent_index = next_lvl_meta.matrix_data_index;
const unsigned p = parent_index + lvl_parents[tid];
const T factor = -u[pos] / d[pos];
gpu_atomic_add(d + p, factor*u[pos]);
gpu_atomic_add(rhs + p, factor*rhs[pos]);
}
}
__syncthreads();
}
// Solve the root
{
// The levels are sorted such that the root is the last level
const auto& last_lvl_meta = block_level_meta[num_levels-1];
const auto lvl_lengths = level_lengths + last_lvl_meta.level_data_index;
const unsigned width = num_matrix[bid];
if (tid < width) {
const unsigned len = lvl_lengths[tid];
unsigned pos = last_lvl_meta.matrix_data_index + tid;
if (d[pos]!=0) {
// backward
for (unsigned i=0; i<len-1; ++i) {
const unsigned next_pos = pos + width;
const T factor = -u[pos] / d[pos];
const auto rhs_next = rhs[next_pos];
const auto d_next = d[next_pos];
d[next_pos] = fma(factor, u[pos], d_next);
rhs[next_pos] = fma(factor, rhs[pos], rhs_next);
pos = next_pos;
}
auto rhsp = rhs[pos] / d[pos];
rhs[pos] = rhsp;
pos -= width;
// forward
for (unsigned i=0; i<len-1; ++i) {
rhsp = rhs[pos] - u[pos]*rhsp;
rhsp /= d[pos];
rhs[pos] = rhsp;
pos -= width;
}
}
}
}
// forward substitution
// take great care with loop limits decrementing unsigned counter l
for (unsigned l=num_levels-1; l>0; --l) {
const auto& lvl_meta = block_level_meta[l-1];
// Addresses of the first elements of level_lengths and level_parents
// that belong to this level
const auto lvl_lengths = level_lengths + lvl_meta.level_data_index;
const auto lvl_parents = level_parents + lvl_meta.level_data_index;
const unsigned width = lvl_meta.num_branches;
const unsigned parent_index = block_level_meta[l].matrix_data_index;
__syncthreads();
// Perform forward-substitution for each branch on this level.
// One thread per branch.
if (tid < width) {
// Find the index of the first node in this branch.
const unsigned len = lvl_lengths[tid];
unsigned pos = lvl_meta.matrix_data_index + (len-1)*width + tid;
if (d[pos]!=0) {
// Load the rhs value for the parent node of this branch.
const unsigned p = parent_index + lvl_parents[tid];
T rhsp = rhs[p];
// each branch perform substitution
for (unsigned i=0; i<len; ++i) {
rhsp = rhs[pos] - u[pos]*rhsp;
rhsp /= d[pos];
rhs[pos] = rhsp;
pos -= width;
}
}
}
}
}
} // namespace kernels
ARB_ARBOR_API void assemble_matrix_fine(
arb_value_type* d,
arb_value_type* rhs,
const arb_value_type* invariant_d,
const arb_value_type* voltage,
const arb_value_type* current,
const arb_value_type* conductivity,
const arb_value_type* cv_capacitance,
const arb_value_type* area,
const arb_value_type dt,
const arb_index_type* perm,
unsigned n)
{
launch_1d(n, 128, kernels::assemble_matrix_fine<arb_value_type, arb_index_type>,
d, rhs, invariant_d, voltage, current, conductivity, cv_capacitance, area,
dt, perm, n);
}
// Example:
//
// block 0 block 1 block 2
// .~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~ ~ ~
//
// L0 \ / L5 \ /
// \/ \/
// L1 \ / \ / L3 \ / \ | / \ / L6 \ / . . .
// \ / \ / \ / \|/ \ / \ /
// L2 | | L4 | | | L7 |
// | | | | | |
//
// levels = [L0, L1, L2, L3, L4, L5, L6, L7, ... ]
// block_index = [0, 3, 5, 8, ...]
// num_levels = [3, 2, 3, ...]
// num_cells = [2, 3, ...]
// num_blocks = level_start.size() - 1 = num_levels.size() = num_cells.size()
ARB_ARBOR_API void solve_matrix_fine(
arb_value_type* rhs,
arb_value_type* d, // diagonal values
const arb_value_type* u, // upper diagonal (and lower diagonal as the matrix is SPD)
const level_metadata* level_meta, // information pertaining to each level
const arb_index_type* level_lengths, // lengths of branches of every level concatenated
const arb_index_type* level_parents, // parents of branches of every level concatenated
const arb_index_type* block_index, // start index into levels for each gpu block
arb_index_type* num_cells, // the number of cells packed into this single matrix
arb_index_type* padded_size, // length of rhs, d, u, including padding
unsigned num_blocks, // number of blocks
unsigned blocksize) // size of each block
{
launch(num_blocks, blocksize, kernels::solve_matrix_fine<arb_value_type>,
rhs, d, u, level_meta, level_lengths, level_parents, block_index,
num_cells);
}
} // namespace gpu
} // namespace arb
|
529d99141cb96e8c80ade6a1e811317afbfa3c40.cu
|
#include <arbor/fvm_types.hpp>
#include <arbor/gpu/gpu_api.hpp>
#include <arbor/gpu/gpu_common.hpp>
#include "matrix_common.hpp"
#include "matrix_fine.hpp"
namespace arb {
namespace gpu {
namespace kernels {
/// GPU implementation of Hines matrix assembly.
/// Fine layout.
/// For a given time step size dt:
/// - use the precomputed alpha and alpha_d values to construct the diagonal
/// and off diagonal of the symmetric Hines matrix.
/// - compute the RHS of the linear system to solve.
template <typename T, typename I>
__global__
void assemble_matrix_fine(
T* __restrict__ const d,
T* __restrict__ const rhs,
const T* __restrict__ const invariant_d,
const T* __restrict__ const voltage,
const T* __restrict__ const current,
const T* __restrict__ const conductivity,
const T* __restrict__ const cv_capacitance,
const T* __restrict__ const area,
const T dt,
const I* __restrict__ const perm,
unsigned n)
{
const unsigned tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < n) {
// The 1e-3 is a constant of proportionality required to ensure that the
// conductance (gi) values have units μS (micro-Siemens).
// See the model documentation in docs/model for more information.
const auto pid = perm[tid];
const auto area_factor = T(1e-3)*area[tid];
const auto gi = T(1e-3)*cv_capacitance[tid]/dt + area_factor*conductivity[tid];
d[pid] = gi + invariant_d[tid];
rhs[pid] = gi*voltage[tid] - area_factor*current[tid];
}
}
/// GPU implementation of Hines Matrix solver.
/// Fine-grained tree based solver.
/// Each block solves a set of matricesb iterating over the levels of matrix
/// and perfoming a backward and forward substitution. On each level one thread
/// gets assigned to one branch on this level of a matrix and solves and
/// performs the substitution. Afterwards all threads continue on the next
/// level.
/// To avoid idle threads, one should try that on each level, there is a similar
/// number of branches.
template <typename T>
__global__
void solve_matrix_fine(
T* __restrict__ const rhs,
T* __restrict__ const d,
const T* __restrict__ const u,
const level_metadata* __restrict__ const level_meta,
const arb_index_type* __restrict__ const level_lengths,
const arb_index_type* __restrict__ const level_parents,
const arb_index_type* __restrict__ const block_index,
const arb_index_type* __restrict__ const num_matrix) // number of packed matrices = number of cells
{
const auto tid = threadIdx.x;
const auto bid = blockIdx.x;
const auto first_level = block_index[bid];
const auto num_levels = block_index[bid + 1] - first_level;
const auto block_level_meta = &level_meta[first_level];
// backward substitution
for (unsigned l=0; l<num_levels-1; ++l) {
// Metadata for this level and the next level
const auto& lvl_meta = block_level_meta[l];
const auto& next_lvl_meta = block_level_meta[l+1];
// Addresses of the first elements of level_lengths and level_parents
// that belong to this level
const auto lvl_lengths = level_lengths + lvl_meta.level_data_index;
const auto lvl_parents = level_parents + lvl_meta.level_data_index;
const unsigned width = lvl_meta.num_branches;
// Perform backward substitution for each branch on this level.
// One thread per branch.
if (tid < width) {
const unsigned len = lvl_lengths[tid];
unsigned pos = lvl_meta.matrix_data_index + tid;
// Zero diagonal term implies dt==0; just leave rhs (for whole matrix)
// alone in that case.
// Each cell has a different `dt`, because we choose time step size
// according to when the next event is arriving at a cell. So, some
// cells require more time steps than others, but we have to solve
// all the matrices at the same time. When a cell finishes, we put a
// `0` on the diagonal to mark that it should not be solved for.
if (d[pos]!=0) {
// each branch perform substitution
for (unsigned i=0; i<len-1; ++i) {
const unsigned next_pos = pos + width;
const auto d_next = d[next_pos];
const auto rhs_next = rhs[next_pos];
const T factor = -u[pos]/d[pos];
d[next_pos] = fma(factor, u[pos], d_next);
rhs[next_pos] = fma(factor, rhs[pos], rhs_next);
pos = next_pos;
}
// Update d and rhs at the parent node of this branch.
// A parent may have more than one contributing to it, so we use
// atomic updates to avoid races conditions.
const unsigned parent_index = next_lvl_meta.matrix_data_index;
const unsigned p = parent_index + lvl_parents[tid];
const T factor = -u[pos] / d[pos];
gpu_atomic_add(d + p, factor*u[pos]);
gpu_atomic_add(rhs + p, factor*rhs[pos]);
}
}
__syncthreads();
}
// Solve the root
{
// The levels are sorted such that the root is the last level
const auto& last_lvl_meta = block_level_meta[num_levels-1];
const auto lvl_lengths = level_lengths + last_lvl_meta.level_data_index;
const unsigned width = num_matrix[bid];
if (tid < width) {
const unsigned len = lvl_lengths[tid];
unsigned pos = last_lvl_meta.matrix_data_index + tid;
if (d[pos]!=0) {
// backward
for (unsigned i=0; i<len-1; ++i) {
const unsigned next_pos = pos + width;
const T factor = -u[pos] / d[pos];
const auto rhs_next = rhs[next_pos];
const auto d_next = d[next_pos];
d[next_pos] = fma(factor, u[pos], d_next);
rhs[next_pos] = fma(factor, rhs[pos], rhs_next);
pos = next_pos;
}
auto rhsp = rhs[pos] / d[pos];
rhs[pos] = rhsp;
pos -= width;
// forward
for (unsigned i=0; i<len-1; ++i) {
rhsp = rhs[pos] - u[pos]*rhsp;
rhsp /= d[pos];
rhs[pos] = rhsp;
pos -= width;
}
}
}
}
// forward substitution
// take great care with loop limits decrementing unsigned counter l
for (unsigned l=num_levels-1; l>0; --l) {
const auto& lvl_meta = block_level_meta[l-1];
// Addresses of the first elements of level_lengths and level_parents
// that belong to this level
const auto lvl_lengths = level_lengths + lvl_meta.level_data_index;
const auto lvl_parents = level_parents + lvl_meta.level_data_index;
const unsigned width = lvl_meta.num_branches;
const unsigned parent_index = block_level_meta[l].matrix_data_index;
__syncthreads();
// Perform forward-substitution for each branch on this level.
// One thread per branch.
if (tid < width) {
// Find the index of the first node in this branch.
const unsigned len = lvl_lengths[tid];
unsigned pos = lvl_meta.matrix_data_index + (len-1)*width + tid;
if (d[pos]!=0) {
// Load the rhs value for the parent node of this branch.
const unsigned p = parent_index + lvl_parents[tid];
T rhsp = rhs[p];
// each branch perform substitution
for (unsigned i=0; i<len; ++i) {
rhsp = rhs[pos] - u[pos]*rhsp;
rhsp /= d[pos];
rhs[pos] = rhsp;
pos -= width;
}
}
}
}
}
} // namespace kernels
ARB_ARBOR_API void assemble_matrix_fine(
arb_value_type* d,
arb_value_type* rhs,
const arb_value_type* invariant_d,
const arb_value_type* voltage,
const arb_value_type* current,
const arb_value_type* conductivity,
const arb_value_type* cv_capacitance,
const arb_value_type* area,
const arb_value_type dt,
const arb_index_type* perm,
unsigned n)
{
launch_1d(n, 128, kernels::assemble_matrix_fine<arb_value_type, arb_index_type>,
d, rhs, invariant_d, voltage, current, conductivity, cv_capacitance, area,
dt, perm, n);
}
// Example:
//
// block 0 block 1 block 2
// .~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~ ~ ~
//
// L0 \ / L5 \ /
// \/ \/
// L1 \ / \ / L3 \ / \ | / \ / L6 \ / . . .
// \ / \ / \ / \|/ \ / \ /
// L2 | | L4 | | | L7 |
// | | | | | |
//
// levels = [L0, L1, L2, L3, L4, L5, L6, L7, ... ]
// block_index = [0, 3, 5, 8, ...]
// num_levels = [3, 2, 3, ...]
// num_cells = [2, 3, ...]
// num_blocks = level_start.size() - 1 = num_levels.size() = num_cells.size()
ARB_ARBOR_API void solve_matrix_fine(
arb_value_type* rhs,
arb_value_type* d, // diagonal values
const arb_value_type* u, // upper diagonal (and lower diagonal as the matrix is SPD)
const level_metadata* level_meta, // information pertaining to each level
const arb_index_type* level_lengths, // lengths of branches of every level concatenated
const arb_index_type* level_parents, // parents of branches of every level concatenated
const arb_index_type* block_index, // start index into levels for each gpu block
arb_index_type* num_cells, // the number of cells packed into this single matrix
arb_index_type* padded_size, // length of rhs, d, u, including padding
unsigned num_blocks, // number of blocks
unsigned blocksize) // size of each block
{
launch(num_blocks, blocksize, kernels::solve_matrix_fine<arb_value_type>,
rhs, d, u, level_meta, level_lengths, level_parents, block_index,
num_cells);
}
} // namespace gpu
} // namespace arb
|
4196c49f69ee7b0f693bd39b3cd7000652e223eb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define N 100000000
float hArray[N];
float *dArray;
int blocks;
clock_t begin1,begin2,begin3,begin4,end1,end2,end3,end4;
void prologue(void)
{
memset(hArray, 0, sizeof(hArray));
for(int i = 0; i < N; i++)
{
hArray[i] = i + 1;
}
hipMalloc((void**)&dArray, sizeof(hArray));
begin2 = clock();
hipMemcpy(dArray, hArray, sizeof(hArray), hipMemcpyHostToDevice);
end2 = clock();
}
void epilogue(void)
{
hipMemcpy(hArray, dArray, sizeof(hArray), hipMemcpyDeviceToHost);
hipFree(dArray);
}
// Kernel
__global__ void pow3(float *A)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
if(x < N)
{
#pragma unroll
for(int i =0; i< 100; i++)
{
A[x] += A[x] * A[x] * A[x] + A[x] * A[x] + A[x];
}
}
}
__global__ void pow4(float *A)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
if(x < N)
{
#pragma unroll 1
for(int i =0; i< 100; i++)
{
A[x] += A[x] * A[x] * A[x] + A[x] * A[x] + A[x];
}
}
}
//CPU
void cpu(float *A)
{
int x;
for (x = 0; x<N; x++)
{
A[x] = A[x] * A[x] * A[x] * A[x] * A[x] * A[x];
}
}
int main(int argc, char** argv)
{
int devCnt;
hipGetDeviceCount(&devCnt);
if(devCnt == 0) {
perror("No CUDA devices available -- exiting.");
return 1;
}
struct hipDeviceProp_t *prop;
prop = (hipDeviceProp_t*)malloc(sizeof(struct hipDeviceProp_t));
hipGetDeviceProperties(prop,0);
printf("Ilosc watkow: %d\n", prop->maxThreadsPerBlock);
//GPU
prologue();
blocks = N / prop->maxThreadsPerBlock;
if(N % prop->maxThreadsPerBlock)
blocks++;
begin4 = clock();
hipLaunchKernelGGL(( pow3), dim3(blocks), dim3(prop->maxThreadsPerBlock), 0, 0, dArray);
hipDeviceSynchronize();
end4 = clock();
begin1 = clock();
hipLaunchKernelGGL(( pow4), dim3(blocks), dim3(prop->maxThreadsPerBlock), 0, 0, dArray);
hipDeviceSynchronize();
end1 = clock();
epilogue();
//CPU
begin3 = clock();
cpu(hArray);
end3 = clock();
double time_spent1 = (double)(end1 - begin1) / CLOCKS_PER_SEC;
double time_spent4 = (double)(end4 - begin4) / CLOCKS_PER_SEC;
printf("Nie odwiniete: %f\n", time_spent1);
printf("Odwiniete: %f\n", time_spent4);
return 0;
}
|
4196c49f69ee7b0f693bd39b3cd7000652e223eb.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define N 100000000
float hArray[N];
float *dArray;
int blocks;
clock_t begin1,begin2,begin3,begin4,end1,end2,end3,end4;
void prologue(void)
{
memset(hArray, 0, sizeof(hArray));
for(int i = 0; i < N; i++)
{
hArray[i] = i + 1;
}
cudaMalloc((void**)&dArray, sizeof(hArray));
begin2 = clock();
cudaMemcpy(dArray, hArray, sizeof(hArray), cudaMemcpyHostToDevice);
end2 = clock();
}
void epilogue(void)
{
cudaMemcpy(hArray, dArray, sizeof(hArray), cudaMemcpyDeviceToHost);
cudaFree(dArray);
}
// Kernel
__global__ void pow3(float *A)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
if(x < N)
{
#pragma unroll
for(int i =0; i< 100; i++)
{
A[x] += A[x] * A[x] * A[x] + A[x] * A[x] + A[x];
}
}
}
__global__ void pow4(float *A)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
if(x < N)
{
#pragma unroll 1
for(int i =0; i< 100; i++)
{
A[x] += A[x] * A[x] * A[x] + A[x] * A[x] + A[x];
}
}
}
//CPU
void cpu(float *A)
{
int x;
for (x = 0; x<N; x++)
{
A[x] = A[x] * A[x] * A[x] * A[x] * A[x] * A[x];
}
}
int main(int argc, char** argv)
{
int devCnt;
cudaGetDeviceCount(&devCnt);
if(devCnt == 0) {
perror("No CUDA devices available -- exiting.");
return 1;
}
struct cudaDeviceProp *prop;
prop = (cudaDeviceProp*)malloc(sizeof(struct cudaDeviceProp));
cudaGetDeviceProperties(prop,0);
printf("Ilosc watkow: %d\n", prop->maxThreadsPerBlock);
//GPU
prologue();
blocks = N / prop->maxThreadsPerBlock;
if(N % prop->maxThreadsPerBlock)
blocks++;
begin4 = clock();
pow3<<<blocks, prop->maxThreadsPerBlock>>>(dArray);
cudaThreadSynchronize();
end4 = clock();
begin1 = clock();
pow4<<<blocks, prop->maxThreadsPerBlock>>>(dArray);
cudaThreadSynchronize();
end1 = clock();
epilogue();
//CPU
begin3 = clock();
cpu(hArray);
end3 = clock();
double time_spent1 = (double)(end1 - begin1) / CLOCKS_PER_SEC;
double time_spent4 = (double)(end4 - begin4) / CLOCKS_PER_SEC;
printf("Nie odwiniete: %f\n", time_spent1);
printf("Odwiniete: %f\n", time_spent4);
return 0;
}
|
5802cf2ac06f96d3f3f2248e4792fdb2fa4f4254.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* CuFunctor.cu
*
* Created on: Sep 28, 2014
* Author: reid
*/
#include "CuFunctor.h"
#include <iostream>
#include <float.h>
#include "UnaryOpIndexF_Gen.h"
#include "UnaryOpF_Gen.h"
#include "BinaryOpF_Gen.h"
#include "debug.h"
bool SetupMbrFuncs[dtLast][MAX_GPUS];
void clearSetupMbrFlags() {
for(auto i =0; i < dtLast; i++) {
for (auto j = 0; j < MAX_GPUS; j++) {
SetupMbrFuncs[i][j] = false;
}
}
}
__host__ __device__ float& CuFunctor<float,2>::operator[](ptrdiff_t ofs) { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ const float& CuFunctor<float,2>::operator[](ptrdiff_t ofs) const { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ double& CuFunctor<double,2>::operator[](ptrdiff_t ofs) { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ const double& CuFunctor<double,2>::operator[](ptrdiff_t ofs) const { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ int& CuFunctor<int,2>::operator[](ptrdiff_t ofs) { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ const int& CuFunctor<int,2>::operator[](ptrdiff_t ofs) const { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ uint& CuFunctor<uint,2>::operator[](ptrdiff_t ofs) { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ const uint& CuFunctor<uint,2>::operator[](ptrdiff_t ofs) const { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ ulong& CuFunctor<ulong,2>::operator[](ptrdiff_t ofs) { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ const ulong& CuFunctor<ulong,2>::operator[](ptrdiff_t ofs) const { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ float& CuFunctor<float,3>::operator[](ptrdiff_t ofs) {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ const float& CuFunctor<float,3>::operator[](ptrdiff_t ofs) const {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ double& CuFunctor<double,3>::operator[](ptrdiff_t ofs) {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ const double& CuFunctor<double,3>::operator[](ptrdiff_t ofs) const {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ int& CuFunctor<int,3>::operator[](ptrdiff_t ofs) {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ const int& CuFunctor<int,3>::operator[](ptrdiff_t ofs) const {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ uint& CuFunctor<uint,3>::operator[](ptrdiff_t ofs) {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ const uint& CuFunctor<uint,3>::operator[](ptrdiff_t ofs) const {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ ulong& CuFunctor<ulong,3>::operator[](ptrdiff_t ofs) {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ const ulong& CuFunctor<ulong,3>::operator[](ptrdiff_t ofs) const {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
template<> __host__ __device__ float epsilon<float>() {
return 1e-6;
}
template<> __host__ __device__ double epsilon<double>() {
return 1e-10;
}
template<typename T> __host__ __device__ T minValue() {
return 0; // will be overridden by specializations
}
template<> __host__ __device__ float minValue<float>() {
return FLT_MIN;
}
template<> __host__ __device__ double minValue<double>() {
return DBL_MIN;
}
template<> __host__ __device__ int minValue<int>() {
return INT_MIN;
}
template<> __host__ __device__ float maxValue<float>() {
return FLT_MAX;
}
template<> __host__ __device__ double maxValue<double>() {
return DBL_MAX;
}
template<> __host__ __device__ ulong maxValue<ulong>() {
return 0xffffffff;
}
template<> __host__ __device__ int maxValue<int>() {
return INT_MAX;
}
template<> __host__ __device__ uint maxValue<uint>() {
return 0xFFFF;
}
/*
*
* test kernels
*
*
*/
#ifdef CuMatrix_Enable_KTS
template<typename T, template <typename> class IndexUnaryOp> __global__ void switchableIndexFunctorTest( IndexUnaryOp<T> ftr )
#else
template <typename T, int StateDim> __global__ void switchableIndexFunctorTest( UnaryOpIndexF<T,StateDim> ftr )
#endif
{
#ifdef CuMatrix_StatFunc
flprintf("ftr.fn %p\n",ftr.fn);
#else
#ifndef CuMatrix_Enable_KTS
flprintf("ftr.operation %p\n",ftr.operation);
#endif
#endif
/*
flprintf("(device-side) &UnaryOpIndexF<T,0>::operatorOneOver == %p\n", &UnaryOpIndexF<T,0>::operatorOneOver );
uof.operation = &UnaryOpIndexF<T,0>::operatorOneOver;
*/
// flprintf("switchableIndexFunctorTest idx %u, t %f\n", idx, ftr(idx));
// flprintf("switchableIndexFunctorTest idx %u, oif %f\n", idx, uof(idx));
}
#ifdef CuMatrix_Enable_KTS
template<typename T, template <typename> class IndexUnaryOp> __global__ void indexFunctorTest( IndexUnaryOp<T> ftr )
#else
template <typename T, int StateDim> __global__ void indexFunctorTest( UnaryOpIndexF<T,StateDim> ftr )
#endif
{
uint idx = blockDim.x * threadIdx.y + threadIdx.x;
#ifdef CuMatrix_StatFunc
flprintf("indexFunctorTest<float,%d> ftr.fn %p\n", StateDim, ftr.fn);
#else
#ifndef CuMatrix_Enable_KTS
flprintf("indexFunctorTest<float,%d> ftr.operation %p\n", StateDim, ftr.operation);
#endif
#endif
#ifdef CuMatrix_Enable_KTS
flprintf("indexFunctorTest<float> idx %u, t %f\n", idx, (float) ftr(idx));
#else
flprintf("indexFunctorTest<float,%d> idx %u, t %f\n", StateDim, idx, (float) ftr(idx));
#endif
}
#ifdef CuMatrix_Enable_KTS
template <typename T, template <typename> class UnaryOp> __global__ void unaryOpTest( UnaryOp<T> uopf ) {
#else
template <typename T, int StateDim> __global__ void unaryOpTest( UnaryOpF<T,StateDim> uopf ) {
#endif
T xi = static_cast<T>( -5 + 1.0 * threadIdx.x);
flprintf("unaryOpTest xi %f, uopf(xi) %f\n", (float) xi, (float)uopf(xi));
}
#ifdef CuMatrix_Enable_KTS
template <typename T, template <typename> class BinaryOp> __global__ void binaryOpTest( BinaryOp<T> bopf ) {
#else
template <typename T, int StateDim> __global__ void binaryOpTest( BinaryOpF<T,StateDim> bopf ) {
#endif
T xi1 = static_cast<T>( -1.5 + 1.0 * threadIdx.x);
T xi2 = static_cast<T>( -1.5 + 1.0 * threadIdx.y);
flprintf("binaryOpTest xi1 %f,xi2 %f, bopf(xi1,xi2) %f\n", (float) xi1, (float) xi2, (float)bopf(xi1,xi2));
}
void testGets() {
CuFunctor<float,1> d;
d.state = 5;
std::cout << "d[0] " << d[0] << "\n";
assert(d[0]==5);
CuFunctor<ulong,1> du;
du.state = 55u;
std::cout << "du[0] " << du[0] << "\n";
assert(du[0]==55u);
CuFunctor<float,2> d2;
float2 f2;
f2.x = 5; f2.y = 6;
d2.state = f2;
std::cout << "d2[1] " << d2[1] << "\n";
std::cout << "d2[0] " << d2[0] << "\n";
assert(d2[1]==6);
assert(d2[0]==5);
}
/* demonstrates using a switch to select templating by base functor (and using method
* pointer to call the subclass's operator) or templating by functor directly
*/
template<typename T>void test0sFillers() {
oneOverFiller<T> oof = Functory<T,oneOverFiller>::pinch();
UnaryOpIndexF<T,0> uof(oof);
#ifndef CuMatrix_Enable_KTS
#ifdef CuMatrix_StatFunc
printf("uof.operatorOneOver(5) %f\n",(float)uof.operatorOneOver(uof,5));
#else
printf("uof.operatorOneOver(5) %f\n",(float)uof.operatorOneOver(5));
#endif
#endif
checkCudaErrors(hipDeviceSynchronize());
#ifdef CuMatrix_Enable_KTS
std::cout << "callin switchableIndexFunctorTest<<<1,3>>>(oof)\n";
flprintf("test0sFillers host oof(5) %f\n", oof(5));
hipLaunchKernelGGL(( switchableIndexFunctorTest), dim3(1),dim3(3), 0, 0, oof);
#else
flprintf("(host-side) &UnaryOpIndexF<T,0>::operatorOneOver == %p\n", &UnaryOpIndexF<T,0>::operatorOneOver );
flprintf("test0sFillers host oof(5) %f\n", oof(5));
std::cout << "callin switchableIndexFunctorTest with one over filler\n";
std::cout << "callin switchableIndexFunctorTest<T,0><<<1,3>>>(oof)\n";
hipLaunchKernelGGL(( switchableIndexFunctorTest<T,0>), dim3(1),dim3(3), 0, 0, oof);
#endif
checkCudaErrors(hipDeviceSynchronize());
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( switchableIndexFunctorTest), dim3(1),dim3(3), 0, 0, oof);
#else
hipLaunchKernelGGL(( switchableIndexFunctorTest<T,0>), dim3(1),dim3(3), 0, 0, oof);
#endif
checkCudaErrors(hipDeviceSynchronize());
std::cout << "callin indexFunctorTest with oof 1/ filler with value\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( indexFunctorTest), dim3(1),dim3(3), 0, 0, oof);
#else
hipLaunchKernelGGL(( indexFunctorTest<T,0>), dim3(1),dim3(3), 0, 0, oof);
#endif
checkCudaErrors(hipDeviceSynchronize());
}
template<typename T>void test0sUnaryOps() {
sigmoidUnaryOp<T> z = Functory<T,sigmoidUnaryOp>::pinch();
std::cout << "callin unaryOpTest with sigmoidUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,sigmoidUnaryOp>), dim3(1),dim3(10), 0, 0, z);
#else
hipLaunchKernelGGL(( unaryOpTest<T,0>), dim3(1),dim3(10), 0, 0, z);
#endif
checkCudaErrors(hipDeviceSynchronize());
sigmoidGradientUnaryOp<T> zg = Functory<T,sigmoidGradientUnaryOp>::pinch();
std::cout << "callin unaryOpTest with sigmoidGradientUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,sigmoidGradientUnaryOp>), dim3(1),dim3(10), 0, 0, zg);
#else
hipLaunchKernelGGL(( unaryOpTest<T,0>), dim3(1),dim3(10), 0, 0, zg);
#endif
checkCudaErrors(hipDeviceSynchronize());
negateUnaryOp<T> neg = Functory<T,negateUnaryOp>::pinch();
std::cout << "callin unaryOpTest with negateUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,negateUnaryOp>), dim3(1),dim3(10), 0, 0, neg);
#else
hipLaunchKernelGGL(( unaryOpTest<T,0>), dim3(1),dim3(10), 0, 0, neg);
#endif
checkCudaErrors(hipDeviceSynchronize());
logUnaryOp<T> lg = Functory<T,logUnaryOp>::pinch();
std::cout << "callin unaryOpTest with logUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,logUnaryOp>), dim3(1),dim3(10), 0, 0, lg);
#else
hipLaunchKernelGGL(( unaryOpTest<T,0>), dim3(1),dim3(10), 0, 0, lg);
#endif
checkCudaErrors(hipDeviceSynchronize());
oneOverUnaryOp<T> oog = Functory<T,oneOverUnaryOp>::pinch();
std::cout << "callin unaryOpTest with oneOverUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,oneOverUnaryOp>), dim3(1),dim3(10), 0, 0, oog);
#else
hipLaunchKernelGGL(( unaryOpTest<T,0>), dim3(1),dim3(10), 0, 0, oog);
#endif
checkCudaErrors(hipDeviceSynchronize());
sqrtUnaryOp<T> sqrtf = Functory<T,sqrtUnaryOp>::pinch();
std::cout << "callin unaryOpTest with sqrtUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,sqrtUnaryOp>), dim3(1),dim3(10), 0, 0, sqrtf);
#else
hipLaunchKernelGGL(( unaryOpTest<T,0>), dim3(1),dim3(10), 0, 0, sqrtf);
#endif
checkCudaErrors(hipDeviceSynchronize());
sqrUnaryOp<T> sqrf = Functory<T,sqrUnaryOp>::pinch();
std::cout << "callin unaryOpTest with sqrUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,sqrUnaryOp>), dim3(1),dim3(10), 0, 0, sqrf);
#else
hipLaunchKernelGGL(( unaryOpTest<T,0>), dim3(1),dim3(10), 0, 0, sqrf);
#endif
checkCudaErrors(hipDeviceSynchronize());
slowInvSqrtUnaryOp<T> sisf = Functory<T,slowInvSqrtUnaryOp>::pinch();
std::cout << "callin unaryOpTest with slowInvSqrtUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,slowInvSqrtUnaryOp>), dim3(1),dim3(10), 0, 0, sisf);
#else
hipLaunchKernelGGL(( unaryOpTest<T,0>), dim3(1),dim3(10), 0, 0, sisf);
#endif
checkCudaErrors(hipDeviceSynchronize());
approxInvSqrtUnaryOp<T> aisf = Functory<T,approxInvSqrtUnaryOp>::pinch();
std::cout << "callin unaryOpTest with approxInvSqrtUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,approxInvSqrtUnaryOp>), dim3(1),dim3(10), 0, 0, aisf);
#else
hipLaunchKernelGGL(( unaryOpTest<T,0>), dim3(1),dim3(10), 0, 0, aisf);
#endif
checkCudaErrors(hipDeviceSynchronize());
oneOrZeroUnaryOp<T> oozbuof = Functory<T,oneOrZeroUnaryOp>::pinch();
std::cout << "callin unaryOpTest with oneOrZeroUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,oneOrZeroUnaryOp>), dim3(1),dim3(10), 0, 0, oozbuof);
#else
hipLaunchKernelGGL(( unaryOpTest<T,0>), dim3(1),dim3(10), 0, 0, oozbuof);
#endif
checkCudaErrors(hipDeviceSynchronize());
}
template<typename T>void test1sFillers() {
constFiller<T> cf = Functory<T,constFiller>::pinch(6.5);
UnaryOpIndexF<T,1> uof(cf);
#ifdef CuMatrix_StatFunc
printf("uof.operatorConst(5) %f\n",(float)uof.operatorConst(uof,5));
#else
#ifndef CuMatrix_Enable_KTS
printf("uof.operatorConst(5) %f\n",(float)uof.operatorConst(5));
#endif
#endif
std::cout << "callin indexFunctorTest with uof const filler with value " << uof[0] << "\n";
#ifndef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( indexFunctorTest<T,1>), dim3(1),dim3(3), 0, 0, uof);
checkCudaErrors(hipDeviceSynchronize());
std::cout << "callin indexFunctorTest with cf const filler with value " << cf[0] << "\n";
hipLaunchKernelGGL(( indexFunctorTest<T,1>), dim3(1),dim3(3), 0, 0, uof);
checkCudaErrors(hipDeviceSynchronize());
powFiller<T> powFlr = Functory<T,powFiller>::pinch(1.1);
std::cout << "callin indexFunctorTest with powFiller filler with value " << powFlr[0] << "\n";
hipLaunchKernelGGL(( indexFunctorTest<T,1>), dim3(1),dim3(3), 0, 0, powFlr);
checkCudaErrors(hipDeviceSynchronize());
#else
hipLaunchKernelGGL(( indexFunctorTest<T,constFiller>), dim3(1),dim3(3), 0, 0, cf);
checkCudaErrors(hipDeviceSynchronize());
std::cout << "callin indexFunctorTest with cf const filler with value " << cf[0] << "\n";
//indexFunctorTest<T,UnaryOpIndexF><<<1,3>>>(uof);
checkCudaErrors(hipDeviceSynchronize());
sequenceFiller<T> seqf = Functory<T,sequenceFiller>::pinch(21);
std::cout << "callin indexFunctorTest with sequenceFiller filler with value " << seqf[0] << "\n";
hipLaunchKernelGGL(( indexFunctorTest<T,sequenceFiller>), dim3(1),dim3(3), 0, 0, seqf);
checkCudaErrors(hipDeviceSynchronize());
powFiller<T> powFlr = Functory<T,powFiller>::pinch(1.1);
std::cout << "callin indexFunctorTest with powFiller filler with value " << powFlr[0] << "\n";
hipLaunchKernelGGL(( indexFunctorTest<T,powFiller>), dim3(1),dim3(3), 0, 0, powFlr);
checkCudaErrors(hipDeviceSynchronize());
#endif
}
template<typename T>void test1sUnaryOps() {
powUnaryOp<T> puo = Functory<T,powUnaryOp>::pinch(5);
std::cout << "callin unaryOpTest<T,1> with powUnaryOp " << puo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,powUnaryOp>), dim3(1),dim3(10), 0, 0, puo);
#else
hipLaunchKernelGGL(( unaryOpTest<T,1>), dim3(1),dim3(10), 0, 0, puo);
#endif
checkCudaErrors(hipDeviceSynchronize());
scaleUnaryOp<T> suo = Functory<T,scaleUnaryOp>::pinch(50);
std::cout << "callin unaryOpTest<T,1> with scaleUnaryOp " << suo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,scaleUnaryOp>), dim3(1),dim3(10), 0, 0, suo);
#else
hipLaunchKernelGGL(( unaryOpTest<T,1>), dim3(1),dim3(10), 0, 0, suo);
#endif
checkCudaErrors(hipDeviceSynchronize());
translationUnaryOp<T> tuo = Functory<T,translationUnaryOp>::pinch(-37.5);
std::cout << "callin unaryOpTest<T,1> with translationUnaryOp " << tuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,translationUnaryOp>), dim3(1),dim3(10), 0, 0, tuo);
#else
hipLaunchKernelGGL(( unaryOpTest<T,1>), dim3(1),dim3(10), 0, 0, tuo);
#endif
checkCudaErrors(hipDeviceSynchronize());
subFromUnaryOp<T> sfuo = Functory<T,subFromUnaryOp>::pinch(101);
std::cout << "callin unaryOpTest<T,1> with subFromUnaryOp " << sfuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,subFromUnaryOp>), dim3(1),dim3(10), 0, 0, sfuo);
#else
hipLaunchKernelGGL(( unaryOpTest<T,1>), dim3(1),dim3(10), 0, 0, sfuo);
#endif
checkCudaErrors(hipDeviceSynchronize());
ltUnaryOp<T> ltuo = Functory<T,ltUnaryOp>::pinch(2);
std::cout << "callin unaryOpTest<T,1> with ltUnaryOp " << ltuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,ltUnaryOp>), dim3(1),dim3(10), 0, 0, ltuo);
#else
hipLaunchKernelGGL(( unaryOpTest<T,1>), dim3(1),dim3(10), 0, 0, ltuo);
#endif
checkCudaErrors(hipDeviceSynchronize());
lteUnaryOp<T> lteuo = Functory<T,lteUnaryOp>::pinch(2);
std::cout << "callin unaryOpTest<T,1> with lteUnaryOp " << lteuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,lteUnaryOp>), dim3(1),dim3(10), 0, 0, lteuo);
#else
hipLaunchKernelGGL(( unaryOpTest<T,1>), dim3(1),dim3(10), 0, 0, lteuo);
#endif
checkCudaErrors(hipDeviceSynchronize());
gtUnaryOp<T> gtuo = Functory<T,gtUnaryOp>::pinch(2);
std::cout << "callin unaryOpTest<T,1> with gtUnaryOp " << gtuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,gtUnaryOp>), dim3(1),dim3(10), 0, 0, gtuo);
#else
hipLaunchKernelGGL(( unaryOpTest<T,1>), dim3(1),dim3(10), 0, 0, gtuo);
#endif
checkCudaErrors(hipDeviceSynchronize());
gteUnaryOp<T> gteuo = Functory<T,gteUnaryOp>::pinch(2);
std::cout << "callin unaryOpTest<T,1> with gteUnaryOp " << gteuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,gteUnaryOp>), dim3(1),dim3(10), 0, 0, gteuo);
#else
hipLaunchKernelGGL(( unaryOpTest<T,1>), dim3(1),dim3(10), 0, 0, gteuo);
#endif
checkCudaErrors(hipDeviceSynchronize());
eqUnaryOp<T> equo = Functory<T,eqUnaryOp>::pinch(2);
std::cout << "callin unaryOpTest<T,1> with eqUnaryOp " << equo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,eqUnaryOp>), dim3(1),dim3(10), 0, 0, equo);
#else
hipLaunchKernelGGL(( unaryOpTest<T,1>), dim3(1),dim3(10), 0, 0, equo);
#endif
checkCudaErrors(hipDeviceSynchronize());
neqUnaryOp<T> nequo = Functory<T,neqUnaryOp>::pinch(2);
std::cout << "callin unaryOpTest<T,1> with neqUnaryOp " << nequo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,neqUnaryOp>), dim3(1),dim3(10), 0, 0, nequo);
#else
hipLaunchKernelGGL(( unaryOpTest<T,1>), dim3(1),dim3(10), 0, 0, nequo);
#endif
checkCudaErrors(hipDeviceSynchronize());
}
template<typename T>void test1sBinaryOps() {
multBinaryOp<T> mbo = Functory<T,multBinaryOp>::pinch();
std::cout << "callin binaryOpTest<T,1> with multBinaryOp (identity == " << mbo[0] << ")\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( binaryOpTest<T,multBinaryOp>), dim3(dim3(1)),dim3(dim3(3,3)), 0, 0, mbo);
#else
hipLaunchKernelGGL(( binaryOpTest<T,1>), dim3(dim3(1)),dim3(dim3(3,3)), 0, 0, mbo);
#endif
checkCudaErrors(hipDeviceSynchronize());
plusBinaryOp<T> pbo = Functory<T,plusBinaryOp>::pinch();
std::cout << "callin binaryOpTest<T,1> with plusBinaryOp (identity == " << pbo.identity_ro() << ")\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( binaryOpTest<T,plusBinaryOp>), dim3(dim3(1)),dim3(dim3(3,3)), 0, 0, pbo);
#else
hipLaunchKernelGGL(( binaryOpTest<T,1>), dim3(dim3(1)),dim3(dim3(3,3)), 0, 0, pbo);
#endif
checkCudaErrors(hipDeviceSynchronize());
}
template<typename T>void test2sFillers() {
sequenceFiller<T> seqf = Functory<T,sequenceFiller>::pinch(10,2);
std::cout << "callin indexFunctorTest with sequenceFiller filler with start " << seqf[0] << " and step " << seqf[1] << "\n";
#ifndef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( indexFunctorTest<T,2>), dim3(1),dim3(10), 0, 0, seqf); // 5 rows
#else
hipLaunchKernelGGL(( indexFunctorTest<T,sequenceFiller>), dim3(1),dim3(10), 0, 0, seqf); // 5 rows
#endif
checkCudaErrors(hipDeviceSynchronize());
increasingColumnsFiller<T> icf = Functory<T,increasingColumnsFiller>::pinch(10,2);
std::cout << "callin indexFunctorTest<T,2> with increasingColumnsFiller with start " << icf[0] << " and width " << icf[1] << " cols \n";
#ifndef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( indexFunctorTest<T,2>), dim3(1),dim3(10), 0, 0, icf); // 5 rows
#else
hipLaunchKernelGGL(( indexFunctorTest<T,increasingColumnsFiller>), dim3(1),dim3(10), 0, 0, icf); // 5 rows
#endif
checkCudaErrors(hipDeviceSynchronize());
increasingRowsFiller<T> irf = Functory<T,increasingRowsFiller>::pinch(5, 5);
std::cout << "callin indexFunctorTest<T,2> with increasingRowsFiller with start " << irf[0] << " and height " << irf[1] << " rows \n";
#ifndef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( indexFunctorTest<T,2>), dim3(1),dim3(10), 0, 0, irf);
#else
hipLaunchKernelGGL(( indexFunctorTest<T,increasingRowsFiller>), dim3(1),dim3(10), 0, 0, irf);
#endif
checkCudaErrors(hipDeviceSynchronize());
seqModFiller<T> smf = Functory<T,seqModFiller>::pinch(5, 5);
std::cout << "callin indexFunctorTest<T,2> with seqModFiller with phase " << smf[0] << " and mod " << smf[1] << "\n";
#ifndef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( indexFunctorTest<T,2>), dim3(1),dim3(10), 0, 0, smf);
#else
hipLaunchKernelGGL(( indexFunctorTest<T,seqModFiller>), dim3(1),dim3(10), 0, 0, smf);
#endif
checkCudaErrors(hipDeviceSynchronize());
diagonalFiller<T> dgf = Functory<T,diagonalFiller>::pinch(5, 10);
std::cout << "callin indexFunctorTest<T,2> with diagonalFiller with value " << dgf.value_ro() << " and dim " << dgf[1] << "\n";
#ifndef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( indexFunctorTest<T,2>), dim3(1),dim3(dim3(10,10,1)), 0, 0, dgf);
#else
hipLaunchKernelGGL(( indexFunctorTest<T,diagonalFiller>), dim3(1),dim3(dim3(10,10,1)), 0, 0, dgf);
#endif
checkCudaErrors(hipDeviceSynchronize());
}
template<typename T>void test2sUnaryOps() {
almostEqUnaryOp<T> aeuo = Functory<T,almostEqUnaryOp>::pinch(static_cast<T>(0),static_cast<T>(2));
std::cout << "callin unaryOpTest<T,2> with almostEqUnaryOp " << aeuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,almostEqUnaryOp>), dim3(1),dim3(10), 0, 0, aeuo);
#else
hipLaunchKernelGGL(( unaryOpTest<T,2>), dim3(1),dim3(10), 0, 0, aeuo);
#endif
checkCudaErrors(hipDeviceSynchronize());
notAlmostEqUnaryOp<T> naeuo = Functory<T,notAlmostEqUnaryOp>::pinch(static_cast<T>(0),2);
std::cout << "callin unaryOpTest<T,2> with notAlmostEqUnaryOp " << naeuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,notAlmostEqUnaryOp>), dim3(1),dim3(10), 0, 0, naeuo);
#else
hipLaunchKernelGGL(( unaryOpTest<T,2>), dim3(1),dim3(10), 0, 0, naeuo);
#endif
checkCudaErrors(hipDeviceSynchronize());
}
template<typename T>void test3sUnaryOps() {
idx1DblockAlmostEqUnaryOp<T> ibauo = Functory<T,idx1DblockAlmostEqUnaryOp>::pinch(static_cast<T>(0),static_cast<T>(2),static_cast<T>(4));
std::cout << "callin idx1DblockAlmostEqUnaryOp<T,3> with idx1DblockAlmostEqUnaryOp " << ibauo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( unaryOpTest<T,idx1DblockAlmostEqUnaryOpy>), dim3(1),dim3(10), 0, 0, ibauo);
#else
hipLaunchKernelGGL(( unaryOpTest<T,3>), dim3(1),dim3(10), 0, 0, ibauo);
#endif
checkCudaErrors(hipDeviceSynchronize());
}
template<typename T>void test3sFillers() {
sinFiller<T> sf = Functory<T,sinFiller>::pinch(20,3,10);
cosFiller<T> cf = Functory<T,cosFiller>::pinch(20,3,10);
/*
or more readably
sf.phase() = 10;
sf.amplitude() = 20;
sf.period() =3;
*/
std::cout << "sf.ampl " << sf[0] << "\n";
std::cout << "callin indexFunctorTest<T,3> with sin filler\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( indexFunctorTest<T,sinFiller>), dim3(1),dim3(3), 0, 0, sf);
checkCudaErrors(hipDeviceSynchronize());
std::cout << "cf.ampl " << cf.amplitude_ro() << "\n";
#else
hipLaunchKernelGGL(( indexFunctorTest<T,3>), dim3(1),dim3(3), 0, 0, sf);
checkCudaErrors(hipDeviceSynchronize());
std::cout << "cf.ampl " << cf.amplitude_ro() << "\n";
#endif
std::cout << "cf[1] " << cf[1] << "\n";
std::cout << "callin indexFunctorTest<T,3> with cos filler\n";
#ifdef CuMatrix_Enable_KTS
hipLaunchKernelGGL(( indexFunctorTest<T,cosFiller>), dim3(1),dim3(3), 0, 0, cf);
#else
hipLaunchKernelGGL(( indexFunctorTest<T,3>), dim3(1),dim3(3), 0, 0, cf);
#endif
checkCudaErrors(hipDeviceSynchronize());
}
int cuFunctorMain() {
int device;
checkCudaErrors(hipGetDevice(&device));
#ifndef CuMatrix_Enable_KTS
#ifdef CuMatrix_StatFunc
unaryOpIndexMbrs<float>::setupAllFunctionTables(device);
#else
unaryOpIndexMbrs<float>::setupAllMethodTables(device);
#endif
#endif
testGets();
test0sFillers<float>();
test1sFillers<float>();
test2sFillers<float>();
test2sUnaryOps<float>();
test3sFillers<float>();
test0sUnaryOps<float>();
test1sUnaryOps<float>();
test1sBinaryOps<float>();
return 0;
}
|
5802cf2ac06f96d3f3f2248e4792fdb2fa4f4254.cu
|
/*
* CuFunctor.cu
*
* Created on: Sep 28, 2014
* Author: reid
*/
#include "CuFunctor.h"
#include <iostream>
#include <float.h>
#include "UnaryOpIndexF_Gen.h"
#include "UnaryOpF_Gen.h"
#include "BinaryOpF_Gen.h"
#include "debug.h"
bool SetupMbrFuncs[dtLast][MAX_GPUS];
void clearSetupMbrFlags() {
for(auto i =0; i < dtLast; i++) {
for (auto j = 0; j < MAX_GPUS; j++) {
SetupMbrFuncs[i][j] = false;
}
}
}
__host__ __device__ float& CuFunctor<float,2>::operator[](ptrdiff_t ofs) { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ const float& CuFunctor<float,2>::operator[](ptrdiff_t ofs) const { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ double& CuFunctor<double,2>::operator[](ptrdiff_t ofs) { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ const double& CuFunctor<double,2>::operator[](ptrdiff_t ofs) const { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ int& CuFunctor<int,2>::operator[](ptrdiff_t ofs) { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ const int& CuFunctor<int,2>::operator[](ptrdiff_t ofs) const { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ uint& CuFunctor<uint,2>::operator[](ptrdiff_t ofs) { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ const uint& CuFunctor<uint,2>::operator[](ptrdiff_t ofs) const { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ ulong& CuFunctor<ulong,2>::operator[](ptrdiff_t ofs) { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ const ulong& CuFunctor<ulong,2>::operator[](ptrdiff_t ofs) const { assert(ofs >= 0 && ofs < 2); return ofs ? state.y : state.x; }
__host__ __device__ float& CuFunctor<float,3>::operator[](ptrdiff_t ofs) {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ const float& CuFunctor<float,3>::operator[](ptrdiff_t ofs) const {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ double& CuFunctor<double,3>::operator[](ptrdiff_t ofs) {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ const double& CuFunctor<double,3>::operator[](ptrdiff_t ofs) const {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ int& CuFunctor<int,3>::operator[](ptrdiff_t ofs) {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ const int& CuFunctor<int,3>::operator[](ptrdiff_t ofs) const {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ uint& CuFunctor<uint,3>::operator[](ptrdiff_t ofs) {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ const uint& CuFunctor<uint,3>::operator[](ptrdiff_t ofs) const {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ ulong& CuFunctor<ulong,3>::operator[](ptrdiff_t ofs) {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
__host__ __device__ const ulong& CuFunctor<ulong,3>::operator[](ptrdiff_t ofs) const {
assert(ofs >= 0 && ofs < 3);
switch (ofs) {
case 0:
return state.x;
case 1:
return state.y;
case 2:
return state.z;
default:
assert(false);
return state.x;
}
}
template<> __host__ __device__ float epsilon<float>() {
return 1e-6;
}
template<> __host__ __device__ double epsilon<double>() {
return 1e-10;
}
template<typename T> __host__ __device__ T minValue() {
return 0; // will be overridden by specializations
}
template<> __host__ __device__ float minValue<float>() {
return FLT_MIN;
}
template<> __host__ __device__ double minValue<double>() {
return DBL_MIN;
}
template<> __host__ __device__ int minValue<int>() {
return INT_MIN;
}
template<> __host__ __device__ float maxValue<float>() {
return FLT_MAX;
}
template<> __host__ __device__ double maxValue<double>() {
return DBL_MAX;
}
template<> __host__ __device__ ulong maxValue<ulong>() {
return 0xffffffff;
}
template<> __host__ __device__ int maxValue<int>() {
return INT_MAX;
}
template<> __host__ __device__ uint maxValue<uint>() {
return 0xFFFF;
}
/*
*
* test kernels
*
*
*/
#ifdef CuMatrix_Enable_KTS
template<typename T, template <typename> class IndexUnaryOp> __global__ void switchableIndexFunctorTest( IndexUnaryOp<T> ftr )
#else
template <typename T, int StateDim> __global__ void switchableIndexFunctorTest( UnaryOpIndexF<T,StateDim> ftr )
#endif
{
#ifdef CuMatrix_StatFunc
flprintf("ftr.fn %p\n",ftr.fn);
#else
#ifndef CuMatrix_Enable_KTS
flprintf("ftr.operation %p\n",ftr.operation);
#endif
#endif
/*
flprintf("(device-side) &UnaryOpIndexF<T,0>::operatorOneOver == %p\n", &UnaryOpIndexF<T,0>::operatorOneOver );
uof.operation = &UnaryOpIndexF<T,0>::operatorOneOver;
*/
// flprintf("switchableIndexFunctorTest idx %u, t %f\n", idx, ftr(idx));
// flprintf("switchableIndexFunctorTest idx %u, oif %f\n", idx, uof(idx));
}
#ifdef CuMatrix_Enable_KTS
template<typename T, template <typename> class IndexUnaryOp> __global__ void indexFunctorTest( IndexUnaryOp<T> ftr )
#else
template <typename T, int StateDim> __global__ void indexFunctorTest( UnaryOpIndexF<T,StateDim> ftr )
#endif
{
uint idx = blockDim.x * threadIdx.y + threadIdx.x;
#ifdef CuMatrix_StatFunc
flprintf("indexFunctorTest<float,%d> ftr.fn %p\n", StateDim, ftr.fn);
#else
#ifndef CuMatrix_Enable_KTS
flprintf("indexFunctorTest<float,%d> ftr.operation %p\n", StateDim, ftr.operation);
#endif
#endif
#ifdef CuMatrix_Enable_KTS
flprintf("indexFunctorTest<float> idx %u, t %f\n", idx, (float) ftr(idx));
#else
flprintf("indexFunctorTest<float,%d> idx %u, t %f\n", StateDim, idx, (float) ftr(idx));
#endif
}
#ifdef CuMatrix_Enable_KTS
template <typename T, template <typename> class UnaryOp> __global__ void unaryOpTest( UnaryOp<T> uopf ) {
#else
template <typename T, int StateDim> __global__ void unaryOpTest( UnaryOpF<T,StateDim> uopf ) {
#endif
T xi = static_cast<T>( -5 + 1.0 * threadIdx.x);
flprintf("unaryOpTest xi %f, uopf(xi) %f\n", (float) xi, (float)uopf(xi));
}
#ifdef CuMatrix_Enable_KTS
template <typename T, template <typename> class BinaryOp> __global__ void binaryOpTest( BinaryOp<T> bopf ) {
#else
template <typename T, int StateDim> __global__ void binaryOpTest( BinaryOpF<T,StateDim> bopf ) {
#endif
T xi1 = static_cast<T>( -1.5 + 1.0 * threadIdx.x);
T xi2 = static_cast<T>( -1.5 + 1.0 * threadIdx.y);
flprintf("binaryOpTest xi1 %f,xi2 %f, bopf(xi1,xi2) %f\n", (float) xi1, (float) xi2, (float)bopf(xi1,xi2));
}
void testGets() {
CuFunctor<float,1> d;
d.state = 5;
std::cout << "d[0] " << d[0] << "\n";
assert(d[0]==5);
CuFunctor<ulong,1> du;
du.state = 55u;
std::cout << "du[0] " << du[0] << "\n";
assert(du[0]==55u);
CuFunctor<float,2> d2;
float2 f2;
f2.x = 5; f2.y = 6;
d2.state = f2;
std::cout << "d2[1] " << d2[1] << "\n";
std::cout << "d2[0] " << d2[0] << "\n";
assert(d2[1]==6);
assert(d2[0]==5);
}
/* demonstrates using a switch to select templating by base functor (and using method
* pointer to call the subclass's operator) or templating by functor directly
*/
template<typename T>void test0sFillers() {
oneOverFiller<T> oof = Functory<T,oneOverFiller>::pinch();
UnaryOpIndexF<T,0> uof(oof);
#ifndef CuMatrix_Enable_KTS
#ifdef CuMatrix_StatFunc
printf("uof.operatorOneOver(5) %f\n",(float)uof.operatorOneOver(uof,5));
#else
printf("uof.operatorOneOver(5) %f\n",(float)uof.operatorOneOver(5));
#endif
#endif
checkCudaErrors(cudaDeviceSynchronize());
#ifdef CuMatrix_Enable_KTS
std::cout << "callin switchableIndexFunctorTest<<<1,3>>>(oof)\n";
flprintf("test0sFillers host oof(5) %f\n", oof(5));
switchableIndexFunctorTest<<<1,3>>>(oof);
#else
flprintf("(host-side) &UnaryOpIndexF<T,0>::operatorOneOver == %p\n", &UnaryOpIndexF<T,0>::operatorOneOver );
flprintf("test0sFillers host oof(5) %f\n", oof(5));
std::cout << "callin switchableIndexFunctorTest with one over filler\n";
std::cout << "callin switchableIndexFunctorTest<T,0><<<1,3>>>(oof)\n";
switchableIndexFunctorTest<T,0><<<1,3>>>(oof);
#endif
checkCudaErrors(cudaDeviceSynchronize());
#ifdef CuMatrix_Enable_KTS
switchableIndexFunctorTest<<<1,3>>>(oof);
#else
switchableIndexFunctorTest<T,0><<<1,3>>>(oof);
#endif
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "callin indexFunctorTest with oof 1/ filler with value\n";
#ifdef CuMatrix_Enable_KTS
indexFunctorTest<<<1,3>>>(oof);
#else
indexFunctorTest<T,0><<<1,3>>>(oof);
#endif
checkCudaErrors(cudaDeviceSynchronize());
}
template<typename T>void test0sUnaryOps() {
sigmoidUnaryOp<T> z = Functory<T,sigmoidUnaryOp>::pinch();
std::cout << "callin unaryOpTest with sigmoidUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,sigmoidUnaryOp><<<1,10>>>(z);
#else
unaryOpTest<T,0><<<1,10>>>(z);
#endif
checkCudaErrors(cudaDeviceSynchronize());
sigmoidGradientUnaryOp<T> zg = Functory<T,sigmoidGradientUnaryOp>::pinch();
std::cout << "callin unaryOpTest with sigmoidGradientUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,sigmoidGradientUnaryOp><<<1,10>>>(zg);
#else
unaryOpTest<T,0><<<1,10>>>(zg);
#endif
checkCudaErrors(cudaDeviceSynchronize());
negateUnaryOp<T> neg = Functory<T,negateUnaryOp>::pinch();
std::cout << "callin unaryOpTest with negateUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,negateUnaryOp><<<1,10>>>(neg);
#else
unaryOpTest<T,0><<<1,10>>>(neg);
#endif
checkCudaErrors(cudaDeviceSynchronize());
logUnaryOp<T> lg = Functory<T,logUnaryOp>::pinch();
std::cout << "callin unaryOpTest with logUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,logUnaryOp><<<1,10>>>(lg);
#else
unaryOpTest<T,0><<<1,10>>>(lg);
#endif
checkCudaErrors(cudaDeviceSynchronize());
oneOverUnaryOp<T> oog = Functory<T,oneOverUnaryOp>::pinch();
std::cout << "callin unaryOpTest with oneOverUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,oneOverUnaryOp><<<1,10>>>(oog);
#else
unaryOpTest<T,0><<<1,10>>>(oog);
#endif
checkCudaErrors(cudaDeviceSynchronize());
sqrtUnaryOp<T> sqrtf = Functory<T,sqrtUnaryOp>::pinch();
std::cout << "callin unaryOpTest with sqrtUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,sqrtUnaryOp><<<1,10>>>(sqrtf);
#else
unaryOpTest<T,0><<<1,10>>>(sqrtf);
#endif
checkCudaErrors(cudaDeviceSynchronize());
sqrUnaryOp<T> sqrf = Functory<T,sqrUnaryOp>::pinch();
std::cout << "callin unaryOpTest with sqrUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,sqrUnaryOp><<<1,10>>>(sqrf);
#else
unaryOpTest<T,0><<<1,10>>>(sqrf);
#endif
checkCudaErrors(cudaDeviceSynchronize());
slowInvSqrtUnaryOp<T> sisf = Functory<T,slowInvSqrtUnaryOp>::pinch();
std::cout << "callin unaryOpTest with slowInvSqrtUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,slowInvSqrtUnaryOp><<<1,10>>>(sisf);
#else
unaryOpTest<T,0><<<1,10>>>(sisf);
#endif
checkCudaErrors(cudaDeviceSynchronize());
approxInvSqrtUnaryOp<T> aisf = Functory<T,approxInvSqrtUnaryOp>::pinch();
std::cout << "callin unaryOpTest with approxInvSqrtUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,approxInvSqrtUnaryOp><<<1,10>>>(aisf);
#else
unaryOpTest<T,0><<<1,10>>>(aisf);
#endif
checkCudaErrors(cudaDeviceSynchronize());
oneOrZeroUnaryOp<T> oozbuof = Functory<T,oneOrZeroUnaryOp>::pinch();
std::cout << "callin unaryOpTest with oneOrZeroUnaryOp\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,oneOrZeroUnaryOp><<<1,10>>>(oozbuof);
#else
unaryOpTest<T,0><<<1,10>>>(oozbuof);
#endif
checkCudaErrors(cudaDeviceSynchronize());
}
template<typename T>void test1sFillers() {
constFiller<T> cf = Functory<T,constFiller>::pinch(6.5);
UnaryOpIndexF<T,1> uof(cf);
#ifdef CuMatrix_StatFunc
printf("uof.operatorConst(5) %f\n",(float)uof.operatorConst(uof,5));
#else
#ifndef CuMatrix_Enable_KTS
printf("uof.operatorConst(5) %f\n",(float)uof.operatorConst(5));
#endif
#endif
std::cout << "callin indexFunctorTest with uof const filler with value " << uof[0] << "\n";
#ifndef CuMatrix_Enable_KTS
indexFunctorTest<T,1><<<1,3>>>(uof);
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "callin indexFunctorTest with cf const filler with value " << cf[0] << "\n";
indexFunctorTest<T,1><<<1,3>>>(uof);
checkCudaErrors(cudaDeviceSynchronize());
powFiller<T> powFlr = Functory<T,powFiller>::pinch(1.1);
std::cout << "callin indexFunctorTest with powFiller filler with value " << powFlr[0] << "\n";
indexFunctorTest<T,1><<<1,3>>>(powFlr);
checkCudaErrors(cudaDeviceSynchronize());
#else
indexFunctorTest<T,constFiller><<<1,3>>>(cf);
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "callin indexFunctorTest with cf const filler with value " << cf[0] << "\n";
//indexFunctorTest<T,UnaryOpIndexF><<<1,3>>>(uof);
checkCudaErrors(cudaDeviceSynchronize());
sequenceFiller<T> seqf = Functory<T,sequenceFiller>::pinch(21);
std::cout << "callin indexFunctorTest with sequenceFiller filler with value " << seqf[0] << "\n";
indexFunctorTest<T,sequenceFiller><<<1,3>>>(seqf);
checkCudaErrors(cudaDeviceSynchronize());
powFiller<T> powFlr = Functory<T,powFiller>::pinch(1.1);
std::cout << "callin indexFunctorTest with powFiller filler with value " << powFlr[0] << "\n";
indexFunctorTest<T,powFiller><<<1,3>>>(powFlr);
checkCudaErrors(cudaDeviceSynchronize());
#endif
}
template<typename T>void test1sUnaryOps() {
powUnaryOp<T> puo = Functory<T,powUnaryOp>::pinch(5);
std::cout << "callin unaryOpTest<T,1> with powUnaryOp " << puo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,powUnaryOp><<<1,10>>>(puo);
#else
unaryOpTest<T,1><<<1,10>>>(puo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
scaleUnaryOp<T> suo = Functory<T,scaleUnaryOp>::pinch(50);
std::cout << "callin unaryOpTest<T,1> with scaleUnaryOp " << suo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,scaleUnaryOp><<<1,10>>>(suo);
#else
unaryOpTest<T,1><<<1,10>>>(suo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
translationUnaryOp<T> tuo = Functory<T,translationUnaryOp>::pinch(-37.5);
std::cout << "callin unaryOpTest<T,1> with translationUnaryOp " << tuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,translationUnaryOp><<<1,10>>>(tuo);
#else
unaryOpTest<T,1><<<1,10>>>(tuo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
subFromUnaryOp<T> sfuo = Functory<T,subFromUnaryOp>::pinch(101);
std::cout << "callin unaryOpTest<T,1> with subFromUnaryOp " << sfuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,subFromUnaryOp><<<1,10>>>(sfuo);
#else
unaryOpTest<T,1><<<1,10>>>(sfuo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
ltUnaryOp<T> ltuo = Functory<T,ltUnaryOp>::pinch(2);
std::cout << "callin unaryOpTest<T,1> with ltUnaryOp " << ltuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,ltUnaryOp><<<1,10>>>(ltuo);
#else
unaryOpTest<T,1><<<1,10>>>(ltuo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
lteUnaryOp<T> lteuo = Functory<T,lteUnaryOp>::pinch(2);
std::cout << "callin unaryOpTest<T,1> with lteUnaryOp " << lteuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,lteUnaryOp><<<1,10>>>(lteuo);
#else
unaryOpTest<T,1><<<1,10>>>(lteuo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
gtUnaryOp<T> gtuo = Functory<T,gtUnaryOp>::pinch(2);
std::cout << "callin unaryOpTest<T,1> with gtUnaryOp " << gtuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,gtUnaryOp><<<1,10>>>(gtuo);
#else
unaryOpTest<T,1><<<1,10>>>(gtuo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
gteUnaryOp<T> gteuo = Functory<T,gteUnaryOp>::pinch(2);
std::cout << "callin unaryOpTest<T,1> with gteUnaryOp " << gteuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,gteUnaryOp><<<1,10>>>(gteuo);
#else
unaryOpTest<T,1><<<1,10>>>(gteuo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
eqUnaryOp<T> equo = Functory<T,eqUnaryOp>::pinch(2);
std::cout << "callin unaryOpTest<T,1> with eqUnaryOp " << equo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,eqUnaryOp><<<1,10>>>(equo);
#else
unaryOpTest<T,1><<<1,10>>>(equo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
neqUnaryOp<T> nequo = Functory<T,neqUnaryOp>::pinch(2);
std::cout << "callin unaryOpTest<T,1> with neqUnaryOp " << nequo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,neqUnaryOp><<<1,10>>>(nequo);
#else
unaryOpTest<T,1><<<1,10>>>(nequo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
}
template<typename T>void test1sBinaryOps() {
multBinaryOp<T> mbo = Functory<T,multBinaryOp>::pinch();
std::cout << "callin binaryOpTest<T,1> with multBinaryOp (identity == " << mbo[0] << ")\n";
#ifdef CuMatrix_Enable_KTS
binaryOpTest<T,multBinaryOp><<<dim3(1),dim3(3,3)>>>(mbo);
#else
binaryOpTest<T,1><<<dim3(1),dim3(3,3)>>>(mbo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
plusBinaryOp<T> pbo = Functory<T,plusBinaryOp>::pinch();
std::cout << "callin binaryOpTest<T,1> with plusBinaryOp (identity == " << pbo.identity_ro() << ")\n";
#ifdef CuMatrix_Enable_KTS
binaryOpTest<T,plusBinaryOp><<<dim3(1),dim3(3,3)>>>(pbo);
#else
binaryOpTest<T,1><<<dim3(1),dim3(3,3)>>>(pbo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
}
template<typename T>void test2sFillers() {
sequenceFiller<T> seqf = Functory<T,sequenceFiller>::pinch(10,2);
std::cout << "callin indexFunctorTest with sequenceFiller filler with start " << seqf[0] << " and step " << seqf[1] << "\n";
#ifndef CuMatrix_Enable_KTS
indexFunctorTest<T,2><<<1,10>>>(seqf); // 5 rows
#else
indexFunctorTest<T,sequenceFiller><<<1,10>>>(seqf); // 5 rows
#endif
checkCudaErrors(cudaDeviceSynchronize());
increasingColumnsFiller<T> icf = Functory<T,increasingColumnsFiller>::pinch(10,2);
std::cout << "callin indexFunctorTest<T,2> with increasingColumnsFiller with start " << icf[0] << " and width " << icf[1] << " cols \n";
#ifndef CuMatrix_Enable_KTS
indexFunctorTest<T,2><<<1,10>>>(icf); // 5 rows
#else
indexFunctorTest<T,increasingColumnsFiller><<<1,10>>>(icf); // 5 rows
#endif
checkCudaErrors(cudaDeviceSynchronize());
increasingRowsFiller<T> irf = Functory<T,increasingRowsFiller>::pinch(5, 5);
std::cout << "callin indexFunctorTest<T,2> with increasingRowsFiller with start " << irf[0] << " and height " << irf[1] << " rows \n";
#ifndef CuMatrix_Enable_KTS
indexFunctorTest<T,2><<<1,10>>>(irf);
#else
indexFunctorTest<T,increasingRowsFiller><<<1,10>>>(irf);
#endif
checkCudaErrors(cudaDeviceSynchronize());
seqModFiller<T> smf = Functory<T,seqModFiller>::pinch(5, 5);
std::cout << "callin indexFunctorTest<T,2> with seqModFiller with phase " << smf[0] << " and mod " << smf[1] << "\n";
#ifndef CuMatrix_Enable_KTS
indexFunctorTest<T,2><<<1,10>>>(smf);
#else
indexFunctorTest<T,seqModFiller><<<1,10>>>(smf);
#endif
checkCudaErrors(cudaDeviceSynchronize());
diagonalFiller<T> dgf = Functory<T,diagonalFiller>::pinch(5, 10);
std::cout << "callin indexFunctorTest<T,2> with diagonalFiller with value " << dgf.value_ro() << " and dim " << dgf[1] << "\n";
#ifndef CuMatrix_Enable_KTS
indexFunctorTest<T,2><<<1,dim3(10,10,1)>>>(dgf);
#else
indexFunctorTest<T,diagonalFiller><<<1,dim3(10,10,1)>>>(dgf);
#endif
checkCudaErrors(cudaDeviceSynchronize());
}
template<typename T>void test2sUnaryOps() {
almostEqUnaryOp<T> aeuo = Functory<T,almostEqUnaryOp>::pinch(static_cast<T>(0),static_cast<T>(2));
std::cout << "callin unaryOpTest<T,2> with almostEqUnaryOp " << aeuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,almostEqUnaryOp><<<1,10>>>(aeuo);
#else
unaryOpTest<T,2><<<1,10>>>(aeuo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
notAlmostEqUnaryOp<T> naeuo = Functory<T,notAlmostEqUnaryOp>::pinch(static_cast<T>(0),2);
std::cout << "callin unaryOpTest<T,2> with notAlmostEqUnaryOp " << naeuo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,notAlmostEqUnaryOp><<<1,10>>>(naeuo);
#else
unaryOpTest<T,2><<<1,10>>>(naeuo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
}
template<typename T>void test3sUnaryOps() {
idx1DblockAlmostEqUnaryOp<T> ibauo = Functory<T,idx1DblockAlmostEqUnaryOp>::pinch(static_cast<T>(0),static_cast<T>(2),static_cast<T>(4));
std::cout << "callin idx1DblockAlmostEqUnaryOp<T,3> with idx1DblockAlmostEqUnaryOp " << ibauo[0] << "\n";
#ifdef CuMatrix_Enable_KTS
unaryOpTest<T,idx1DblockAlmostEqUnaryOpy><<<1,10>>>(ibauo);
#else
unaryOpTest<T,3><<<1,10>>>(ibauo);
#endif
checkCudaErrors(cudaDeviceSynchronize());
}
template<typename T>void test3sFillers() {
sinFiller<T> sf = Functory<T,sinFiller>::pinch(20,3,10);
cosFiller<T> cf = Functory<T,cosFiller>::pinch(20,3,10);
/*
or more readably
sf.phase() = 10;
sf.amplitude() = 20;
sf.period() =3;
*/
std::cout << "sf.ampl " << sf[0] << "\n";
std::cout << "callin indexFunctorTest<T,3> with sin filler\n";
#ifdef CuMatrix_Enable_KTS
indexFunctorTest<T,sinFiller><<<1,3>>>(sf);
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "cf.ampl " << cf.amplitude_ro() << "\n";
#else
indexFunctorTest<T,3><<<1,3>>>(sf);
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "cf.ampl " << cf.amplitude_ro() << "\n";
#endif
std::cout << "cf[1] " << cf[1] << "\n";
std::cout << "callin indexFunctorTest<T,3> with cos filler\n";
#ifdef CuMatrix_Enable_KTS
indexFunctorTest<T,cosFiller><<<1,3>>>(cf);
#else
indexFunctorTest<T,3><<<1,3>>>(cf);
#endif
checkCudaErrors(cudaDeviceSynchronize());
}
int cuFunctorMain() {
int device;
checkCudaErrors(cudaGetDevice(&device));
#ifndef CuMatrix_Enable_KTS
#ifdef CuMatrix_StatFunc
unaryOpIndexMbrs<float>::setupAllFunctionTables(device);
#else
unaryOpIndexMbrs<float>::setupAllMethodTables(device);
#endif
#endif
testGets();
test0sFillers<float>();
test1sFillers<float>();
test2sFillers<float>();
test2sUnaryOps<float>();
test3sFillers<float>();
test0sUnaryOps<float>();
test1sUnaryOps<float>();
test1sBinaryOps<float>();
return 0;
}
|
d250adccdc65ee459c2d725b3e173b8bb98dbc3d.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Copyright 2018 XIAOLIN WANG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "Global.h"
namespace cytonLib
{
Global::Global()
{
os=NULL;
one=1.0;
zero=0.0;
batch=0;
initFactor=0.1;
workSpace=NULL;
workSpaceSize=0;
}
void Global::ensureWorkSpace(int size)
{
if(workSpaceSize<size)
{
if(size!=0)
{
checkError(hipFree(workSpace));
}
workSpaceSize=size;
checkError(hipMalloc(&workSpace,workSpaceSize) );
}
}
bool testMode=false;
int batchSize=64;
int blockSize=128;
int blockSize2d=16;
bool random_=false;
void Global::init()
{
if(os==NULL)
{
os=&std::cout;
}
hipSetDevice(0);
hipDeviceSynchronize();
hipDeviceSynchronize();
cudnnStatus_t tCudnn = cudnnCreate(&cudnnHandle);
checkError(tCudnn);
hipblasStatus_t tCublas = hipblasCreate(&cublasHandle);
checkError(tCublas);
hiprandStatus_t tCurand=hiprandCreateGenerator(&curandGenerator, HIPRAND_RNG_PSEUDO_DEFAULT);
checkError(tCurand);
srand (time(NULL));
unsigned long long t=rand();
t=rand();
rnnDropoutSeed = t;
t=rand();
checkError(hiprandSetPseudoRandomGeneratorSeed(curandGenerator, t));
}
template<>
double* Global::onesFD(int size)
{
DevMatReal<double>* res=&onesDouble;
if(res->length()<size)
{
res->resize(size, 1);
res->setValue(1.0);
}
return res->data;
}
template<>
float* Global::onesFD(int size)
{
DevMatReal<float>* res=&onesFloat;
if(res->length()<size)
{
res->resize(size, 1);
res->setValue(1.0);
}
return res->data;
}
Precision* Global::ones(int size)
{
DevMatPrec* res=&ones_;
if(res->length()<size)
{
res->resize(size, 1);
res->setValue(1.0);
}
return res->data;
}
void Global::end()
{
ones_.freeData();
}
Global global;
} /* namespace cytonLib */
|
d250adccdc65ee459c2d725b3e173b8bb98dbc3d.cu
|
/*
Copyright 2018 XIAOLIN WANG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "Global.h"
namespace cytonLib
{
Global::Global()
{
os=NULL;
one=1.0;
zero=0.0;
batch=0;
initFactor=0.1;
workSpace=NULL;
workSpaceSize=0;
}
void Global::ensureWorkSpace(int size)
{
if(workSpaceSize<size)
{
if(size!=0)
{
checkError(cudaFree(workSpace));
}
workSpaceSize=size;
checkError(cudaMalloc(&workSpace,workSpaceSize) );
}
}
bool testMode=false;
int batchSize=64;
int blockSize=128;
int blockSize2d=16;
bool random_=false;
void Global::init()
{
if(os==NULL)
{
os=&std::cout;
}
cudaSetDevice(0);
cudaDeviceSynchronize();
cudaThreadSynchronize();
cudnnStatus_t tCudnn = cudnnCreate(&cudnnHandle);
checkError(tCudnn);
cublasStatus_t tCublas = cublasCreate(&cublasHandle);
checkError(tCublas);
curandStatus_t tCurand=curandCreateGenerator(&curandGenerator, CURAND_RNG_PSEUDO_DEFAULT);
checkError(tCurand);
srand (time(NULL));
unsigned long long t=rand();
t=rand();
rnnDropoutSeed = t;
t=rand();
checkError(curandSetPseudoRandomGeneratorSeed(curandGenerator, t));
}
template<>
double* Global::onesFD(int size)
{
DevMatReal<double>* res=&onesDouble;
if(res->length()<size)
{
res->resize(size, 1);
res->setValue(1.0);
}
return res->data;
}
template<>
float* Global::onesFD(int size)
{
DevMatReal<float>* res=&onesFloat;
if(res->length()<size)
{
res->resize(size, 1);
res->setValue(1.0);
}
return res->data;
}
Precision* Global::ones(int size)
{
DevMatPrec* res=&ones_;
if(res->length()<size)
{
res->resize(size, 1);
res->setValue(1.0);
}
return res->data;
}
void Global::end()
{
ones_.freeData();
}
Global global;
} /* namespace cytonLib */
|
6c46306f58763152ee72f914dbb094875bf2d126.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
* Extended for use in CS 374 at Calvin College by Joel C. Adams.
* Edited by Bryce Allen for proj09, cs374, Calvin University.
*/
/**
* Vector square: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector square. It is the same as the sample illustrating Chapter 2
* of the programming guide with some squares like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
// for timing
#include <omp.h>
#include <math.h>
/**
* CUDA Kernel Device code
*
* Computes the vector square of A and B into C.
* The 3 vectors have the same number of elements numElements.
*/
__global__
void vectorSquare(const float *A, float *C, unsigned long numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = (A[i] * A[i]);
}
}
void checkErr(hipError_t err, const char* msg)
{
if (err != hipSuccess)
{
fprintf(stderr, "%s (error code %d: '%s')!\n", msg, err, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/**
* Host main routine
*/
int main(int argc, char** argv)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
double startTime, endTime, totalTime = 0.0;
// Print the vector length to be used, and compute its size
unsigned long numElements = 50000;
if (argc == 2) {
numElements = strtoul( argv[1] , 0, 10 );
}
size_t size = numElements * sizeof(float);
printf("[Vector square of %lu elements]\n", numElements);
printf("\n");
// Allocate the host input vectors A & B
float * h_A = (float *)malloc(size);
//float * h_B = (float *)malloc(size);
// Allocate the host output vector C
float * h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || /*h_B == NULL ||*/ h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
//h_B[i] = rand()/(float)RAND_MAX;
}
// 1a. Allocate the device input vectors A & B
float * d_A = NULL;
err = hipMalloc((void **)&d_A, size);
checkErr(err, "Failed to allocate device vector A");
// float * d_B = NULL;
// err = hipMalloc((void **)&d_B, size);
// checkErr(err, "Failed to allocate device vector B");
// 1.b. Allocate the device output vector C
float * d_C = NULL;
err = hipMalloc((void **)&d_C, size);
checkErr(err, "Failed to allocate device vector C");
// 2. Copy the host input vectors A and B in host memory
// to the device input vectors in device memory
//printf("Copy input data from the host memory to the CUDA device\n");
startTime = omp_get_wtime();
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
checkErr(err, "Failed to copy device vector A from host to device");
//err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
//checkErr(err, "Failed to copy device vector B from host to device");
endTime = omp_get_wtime() - startTime;
printf("Copy time was %f\n", endTime);
totalTime += endTime;
startTime = 0.0;
endTime = 0.0;
// 3. Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
startTime = omp_get_wtime();
hipLaunchKernelGGL(( vectorSquare), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_C, numElements);
endTime = omp_get_wtime() - startTime;
err = hipGetLastError();
checkErr(err, "Failed to launch vectorAdd kernel");
printf("square time was %f\n", endTime);
totalTime += endTime;
startTime = 0.0;
endTime = 0.0;
// 4. Copy the device result vector in device memory
// to the host result vector in host memory.
//printf("Copy output data from the CUDA device to the host memory\n");
startTime = omp_get_wtime();
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
endTime = omp_get_wtime() - startTime;
checkErr(err, "Failed to copy vector C from device to host");
printf("Copy-back time was %f\n", endTime);
totalTime += endTime;
startTime = 0.0;
endTime = 0.0;
printf("total computation time was %f\n", totalTime);
printf("\n");
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (sqrt(h_C[i]) != h_A[i])//(fabs(sqrt(h_A[i]) - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("CUDA test PASSED\n");
//printf("CUDA time: %lf\n", stopTime-startTime);
// Free device global memory
err = hipFree(d_A);
checkErr(err, "Failed to free device vector A");
//err = hipFree(d_B);
checkErr(err, "Failed to free device vector B");
err = hipFree(d_C);
checkErr(err, "Failed to free device vector C");
// repeat the computation sequentially
startTime = omp_get_wtime();
for (int i = 0; i < numElements; ++i)
{
h_C[i] = sqrt(h_A[i]);
}
endTime = omp_get_wtime() - startTime;
printf("total sequential computation time was %f\n", endTime);
// verify again
for (int i = 0; i < numElements; ++i)
{
if (sqrt(h_C[i]) == h_A[i])
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("\nNormal test PASSED\n");
// printf("Normal time: %lf\n", stopTime-startTime);
// Free host memory
free(h_A);
//free(h_B);
free(h_C);
// Reset the device and exit
err = hipDeviceReset();
checkErr(err, "Unable to reset device");
printf("Done\n");
return 0;
}
|
6c46306f58763152ee72f914dbb094875bf2d126.cu
|
/**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
* Extended for use in CS 374 at Calvin College by Joel C. Adams.
* Edited by Bryce Allen for proj09, cs374, Calvin University.
*/
/**
* Vector square: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector square. It is the same as the sample illustrating Chapter 2
* of the programming guide with some squares like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
// for timing
#include <omp.h>
#include <math.h>
/**
* CUDA Kernel Device code
*
* Computes the vector square of A and B into C.
* The 3 vectors have the same number of elements numElements.
*/
__global__
void vectorSquare(const float *A, float *C, unsigned long numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = (A[i] * A[i]);
}
}
void checkErr(cudaError_t err, const char* msg)
{
if (err != cudaSuccess)
{
fprintf(stderr, "%s (error code %d: '%s')!\n", msg, err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/**
* Host main routine
*/
int main(int argc, char** argv)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
double startTime, endTime, totalTime = 0.0;
// Print the vector length to be used, and compute its size
unsigned long numElements = 50000;
if (argc == 2) {
numElements = strtoul( argv[1] , 0, 10 );
}
size_t size = numElements * sizeof(float);
printf("[Vector square of %lu elements]\n", numElements);
printf("\n");
// Allocate the host input vectors A & B
float * h_A = (float *)malloc(size);
//float * h_B = (float *)malloc(size);
// Allocate the host output vector C
float * h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || /*h_B == NULL ||*/ h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
//h_B[i] = rand()/(float)RAND_MAX;
}
// 1a. Allocate the device input vectors A & B
float * d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
checkErr(err, "Failed to allocate device vector A");
// float * d_B = NULL;
// err = cudaMalloc((void **)&d_B, size);
// checkErr(err, "Failed to allocate device vector B");
// 1.b. Allocate the device output vector C
float * d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
checkErr(err, "Failed to allocate device vector C");
// 2. Copy the host input vectors A and B in host memory
// to the device input vectors in device memory
//printf("Copy input data from the host memory to the CUDA device\n");
startTime = omp_get_wtime();
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
checkErr(err, "Failed to copy device vector A from host to device");
//err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
//checkErr(err, "Failed to copy device vector B from host to device");
endTime = omp_get_wtime() - startTime;
printf("Copy time was %f\n", endTime);
totalTime += endTime;
startTime = 0.0;
endTime = 0.0;
// 3. Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
startTime = omp_get_wtime();
vectorSquare<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_C, numElements);
endTime = omp_get_wtime() - startTime;
err = cudaGetLastError();
checkErr(err, "Failed to launch vectorAdd kernel");
printf("square time was %f\n", endTime);
totalTime += endTime;
startTime = 0.0;
endTime = 0.0;
// 4. Copy the device result vector in device memory
// to the host result vector in host memory.
//printf("Copy output data from the CUDA device to the host memory\n");
startTime = omp_get_wtime();
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
endTime = omp_get_wtime() - startTime;
checkErr(err, "Failed to copy vector C from device to host");
printf("Copy-back time was %f\n", endTime);
totalTime += endTime;
startTime = 0.0;
endTime = 0.0;
printf("total computation time was %f\n", totalTime);
printf("\n");
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (sqrt(h_C[i]) != h_A[i])//(fabs(sqrt(h_A[i]) - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("CUDA test PASSED\n");
//printf("CUDA time: %lf\n", stopTime-startTime);
// Free device global memory
err = cudaFree(d_A);
checkErr(err, "Failed to free device vector A");
//err = cudaFree(d_B);
checkErr(err, "Failed to free device vector B");
err = cudaFree(d_C);
checkErr(err, "Failed to free device vector C");
// repeat the computation sequentially
startTime = omp_get_wtime();
for (int i = 0; i < numElements; ++i)
{
h_C[i] = sqrt(h_A[i]);
}
endTime = omp_get_wtime() - startTime;
printf("total sequential computation time was %f\n", endTime);
// verify again
for (int i = 0; i < numElements; ++i)
{
if (sqrt(h_C[i]) == h_A[i])
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("\nNormal test PASSED\n");
// printf("Normal time: %lf\n", stopTime-startTime);
// Free host memory
free(h_A);
//free(h_B);
free(h_C);
// Reset the device and exit
err = cudaDeviceReset();
checkErr(err, "Unable to reset device");
printf("Done\n");
return 0;
}
|
0764170e723ea6df1f6bf81ea06b3e8b98f70793.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <torch/extension.h>
#include "types.h"
#ifndef MAX_THREAD_NUM
#define MAX_THREAD_NUM 1024
#endif // MAX_THREAD_NUM
namespace fastpatch {
namespace {
#define CHECK_RUNTIME_ERROR(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
} // namespace
__global__ void feat_forward_kernel(int maxsize, int Cin,
const int* __restrict__ nn_offset, const int* __restrict__ nn_list, const float* __restrict__ feat_data,
float* __restrict__ patchfeat_data) {
int u = blockIdx.x; // bi
int N_RW = blockDim.x;
int PATCH_STRIDE = maxsize * Cin;
int Ns = nn_offset[u + 1] - nn_offset[u];
const int* nn = nn_list + nn_offset[u]; // Ns
int ti = threadIdx.x;
int tj = threadIdx.y;
for (int i = ti; i < Ns && i < maxsize; i += N_RW) {
int v = nn[i];
patchfeat_data[u * PATCH_STRIDE + i * Cin + tj] = feat_data[v * Cin + tj];
}
}
__global__ void feat_backward_kernel(int maxsize, int Cin,
const int* __restrict__ grad_nn_offset, const int* __restrict__ grad_nn_list,
const float* __restrict__ grad_patchfeat, float* __restrict__ grad_feat) {
int u = blockIdx.x; // bi
int N_RW = blockDim.x;
int PATCH_STRIDE = maxsize * Cin;
int Ns = grad_nn_offset[u + 1] - grad_nn_offset[u];
const int* grad_nn = grad_nn_list + grad_nn_offset[u] * 2; // Ns x 2
int ti = threadIdx.x;
int tj = threadIdx.y;
for (int i = ti; i < Ns && i < maxsize; i += N_RW) {
int v = grad_nn[i * 2];
int v_offset = grad_nn[i * 2 + 1];
// printf("u, Ns, i, v, v_offset = %d %d %d %d %d\n", u, Ns, i, v, v_offset);
// use atomicAdd instead of +=
atomicAdd(grad_feat + u * Cin + tj, grad_patchfeat[v * PATCH_STRIDE + v_offset * Cin + tj]);
}
}
// feat: N x Cin x 1
// offset: N + 1
// nnlist: squeeze(N x Ns)
torch::Tensor feat_forward(torch::Tensor feat, torch::Tensor nn_offset, torch::Tensor nn_list, int maxsize) {
// Not Implemented
CHECK_CUDA(feat);
CHECK_CUDA(nn_offset);
CHECK_CUDA(nn_list);
int N = torch::size(nn_offset, 0) - 1;
int Cin = torch::size(feat, 1);
torch::Tensor patchfeat = torch::zeros({N, maxsize, Cin, 1}, feat.options());
int n_rw = MAX_THREAD_NUM / Cin;
const dim3 block(n_rw, Cin);
const dim3 grid(N);
hipLaunchKernelGGL(( feat_forward_kernel), dim3(grid), dim3(block), 0, 0,
maxsize, Cin, nn_offset.data_ptr<int>(), nn_list.data_ptr<int>(),
feat.data_ptr<float>(), patchfeat.data_ptr<float>());
CHECK_RUNTIME_ERROR(hipPeekAtLastError());
return patchfeat;
}
torch::Tensor feat_backward(
torch::Tensor grad_patchfeat, torch::Tensor grad_nn_offset, torch::Tensor grad_nn_list, int maxsize) {
CHECK_CUDA(grad_patchfeat);
CHECK_CUDA(grad_nn_offset);
CHECK_CUDA(grad_nn_list);
int N = torch::size(grad_nn_offset, 0) - 1;
int Cin = torch::size(grad_patchfeat, 2); // N x maxsize x Cin x 1
torch::Tensor grad_feat = torch::zeros({N, Cin, 1}, grad_patchfeat.options());
int n_rw = MAX_THREAD_NUM / Cin;
const dim3 block(n_rw, Cin);
const dim3 grid(N);
hipLaunchKernelGGL(( feat_backward_kernel), dim3(grid), dim3(block), 0, 0,
maxsize, Cin,
grad_nn_offset.data_ptr<int>(), grad_nn_list.data_ptr<int>(),
grad_patchfeat.data_ptr<float>(), grad_feat.data_ptr<float>());
CHECK_RUNTIME_ERROR(hipPeekAtLastError());
return grad_feat;
}
__global__ void get_selection_mat_kernel(int maxsize, int S,
const int* __restrict__ nn_offset, const float* __restrict__ nw_list, float* __restrict__ select_mat) {
int u = blockIdx.x; // bi
int N_RW = blockDim.x;
int STRIDE = maxsize * S;
int Ns = nn_offset[u + 1] - nn_offset[u];
const float* nw = nw_list + nn_offset[u] * S; // nw_list N x Ns x S
int ti = threadIdx.x;
int tj = threadIdx.y;
for (int v = ti; v < Ns && v < maxsize; v += N_RW) {
select_mat[u * STRIDE + v * S + tj] = nw[v * S + tj];
}
}
torch::Tensor get_selection_mat(torch::Tensor nn_offset, torch::Tensor nw_list, int maxsize, int S) {
CHECK_CUDA(nn_offset);
CHECK_CUDA(nw_list);
int N = torch::size(nn_offset, 0) - 1;
torch::Tensor select_mat = torch::zeros({N, maxsize, 1, S}, nw_list.options());
int n_rw = MAX_THREAD_NUM / S;
const dim3 block(n_rw, S);
const dim3 grid(N);
hipLaunchKernelGGL(( get_selection_mat_kernel), dim3(grid), dim3(block), 0, 0,
maxsize, S, nn_offset.data_ptr<int>(), nw_list.data_ptr<float>(),
select_mat.data_ptr<float>());
CHECK_RUNTIME_ERROR(hipPeekAtLastError());
return select_mat;
}
// std::pair<torch::Tensor, torch::Tensor> get_grad_nn_list(torch::Tensor nn_offset, torch::Tensor nn_list) {
// int N = torch::size(nn_offset, 0) - 1;
// std::vector<std::vector<int>> grad_nn_v(N);
// std::vector<std::vector<int>> grad_v_offset(N);
// const int* nn_offset_ptr = nn_offset.data_ptr<int>();
// const int* nn_list_ptr = nn_list.data_ptr<int>();
// for (int u = 0; u < N; ++u) {
// int Ns = nn_offset_ptr[u + 1] - nn_offset_ptr[u];
// const int* nn = nn_list_ptr + nn_offset_ptr[u];
// for (int j = 0; j < Ns; ++j) {
// grad_nn_v[nn[j]].push_back(u);
// grad_v_offset[nn[j]].push_back(j);
// }
// }
// torch::Tensor grad_nn_offset = torch::zeros_like(nn_offset);
// int* grad_nn_offset_ptr = grad_nn_offset.data_ptr<int>();
// for (int i = 1; i <= N; ++i) {
// grad_nn_offset_ptr[i] = grad_nn_offset_ptr[i - 1] + grad_nn_v[i - 1].size();
// }
// torch::Tensor grad_nn_list = torch::zeros(grad_nn_offset_ptr[N] * 2, nn_list.options()); // N x Ns x 2
// int* grad_nn_list_ptr = grad_nn_list.data_ptr<int>();
// for (int u = 0; u < N; ++u) {
// int start = grad_nn_offset_ptr[u];
// int Ns = grad_nn_offset_ptr[u + 1] - grad_nn_offset_ptr[u];
// for (int i = 0; i < Ns; ++i) {
// grad_nn_list_ptr[(start + i) * 2] = grad_nn_v[u][i];
// grad_nn_list_ptr[(start + i) * 2 + 1] = grad_v_offset[u][i];
// }
// }
// return {grad_nn_offset, grad_nn_list};
// }
} // namespace fastpatch
|
0764170e723ea6df1f6bf81ea06b3e8b98f70793.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "types.h"
#ifndef MAX_THREAD_NUM
#define MAX_THREAD_NUM 1024
#endif // MAX_THREAD_NUM
namespace fastpatch {
namespace {
#define CHECK_RUNTIME_ERROR(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
} // namespace
__global__ void feat_forward_kernel(int maxsize, int Cin,
const int* __restrict__ nn_offset, const int* __restrict__ nn_list, const float* __restrict__ feat_data,
float* __restrict__ patchfeat_data) {
int u = blockIdx.x; // bi
int N_RW = blockDim.x;
int PATCH_STRIDE = maxsize * Cin;
int Ns = nn_offset[u + 1] - nn_offset[u];
const int* nn = nn_list + nn_offset[u]; // Ns
int ti = threadIdx.x;
int tj = threadIdx.y;
for (int i = ti; i < Ns && i < maxsize; i += N_RW) {
int v = nn[i];
patchfeat_data[u * PATCH_STRIDE + i * Cin + tj] = feat_data[v * Cin + tj];
}
}
__global__ void feat_backward_kernel(int maxsize, int Cin,
const int* __restrict__ grad_nn_offset, const int* __restrict__ grad_nn_list,
const float* __restrict__ grad_patchfeat, float* __restrict__ grad_feat) {
int u = blockIdx.x; // bi
int N_RW = blockDim.x;
int PATCH_STRIDE = maxsize * Cin;
int Ns = grad_nn_offset[u + 1] - grad_nn_offset[u];
const int* grad_nn = grad_nn_list + grad_nn_offset[u] * 2; // Ns x 2
int ti = threadIdx.x;
int tj = threadIdx.y;
for (int i = ti; i < Ns && i < maxsize; i += N_RW) {
int v = grad_nn[i * 2];
int v_offset = grad_nn[i * 2 + 1];
// printf("u, Ns, i, v, v_offset = %d %d %d %d %d\n", u, Ns, i, v, v_offset);
// use atomicAdd instead of +=
atomicAdd(grad_feat + u * Cin + tj, grad_patchfeat[v * PATCH_STRIDE + v_offset * Cin + tj]);
}
}
// feat: N x Cin x 1
// offset: N + 1
// nnlist: squeeze(N x Ns)
torch::Tensor feat_forward(torch::Tensor feat, torch::Tensor nn_offset, torch::Tensor nn_list, int maxsize) {
// Not Implemented
CHECK_CUDA(feat);
CHECK_CUDA(nn_offset);
CHECK_CUDA(nn_list);
int N = torch::size(nn_offset, 0) - 1;
int Cin = torch::size(feat, 1);
torch::Tensor patchfeat = torch::zeros({N, maxsize, Cin, 1}, feat.options());
int n_rw = MAX_THREAD_NUM / Cin;
const dim3 block(n_rw, Cin);
const dim3 grid(N);
feat_forward_kernel<<<grid, block>>>(
maxsize, Cin, nn_offset.data_ptr<int>(), nn_list.data_ptr<int>(),
feat.data_ptr<float>(), patchfeat.data_ptr<float>());
CHECK_RUNTIME_ERROR(cudaPeekAtLastError());
return patchfeat;
}
torch::Tensor feat_backward(
torch::Tensor grad_patchfeat, torch::Tensor grad_nn_offset, torch::Tensor grad_nn_list, int maxsize) {
CHECK_CUDA(grad_patchfeat);
CHECK_CUDA(grad_nn_offset);
CHECK_CUDA(grad_nn_list);
int N = torch::size(grad_nn_offset, 0) - 1;
int Cin = torch::size(grad_patchfeat, 2); // N x maxsize x Cin x 1
torch::Tensor grad_feat = torch::zeros({N, Cin, 1}, grad_patchfeat.options());
int n_rw = MAX_THREAD_NUM / Cin;
const dim3 block(n_rw, Cin);
const dim3 grid(N);
feat_backward_kernel<<<grid, block>>>(
maxsize, Cin,
grad_nn_offset.data_ptr<int>(), grad_nn_list.data_ptr<int>(),
grad_patchfeat.data_ptr<float>(), grad_feat.data_ptr<float>());
CHECK_RUNTIME_ERROR(cudaPeekAtLastError());
return grad_feat;
}
__global__ void get_selection_mat_kernel(int maxsize, int S,
const int* __restrict__ nn_offset, const float* __restrict__ nw_list, float* __restrict__ select_mat) {
int u = blockIdx.x; // bi
int N_RW = blockDim.x;
int STRIDE = maxsize * S;
int Ns = nn_offset[u + 1] - nn_offset[u];
const float* nw = nw_list + nn_offset[u] * S; // nw_list N x Ns x S
int ti = threadIdx.x;
int tj = threadIdx.y;
for (int v = ti; v < Ns && v < maxsize; v += N_RW) {
select_mat[u * STRIDE + v * S + tj] = nw[v * S + tj];
}
}
torch::Tensor get_selection_mat(torch::Tensor nn_offset, torch::Tensor nw_list, int maxsize, int S) {
CHECK_CUDA(nn_offset);
CHECK_CUDA(nw_list);
int N = torch::size(nn_offset, 0) - 1;
torch::Tensor select_mat = torch::zeros({N, maxsize, 1, S}, nw_list.options());
int n_rw = MAX_THREAD_NUM / S;
const dim3 block(n_rw, S);
const dim3 grid(N);
get_selection_mat_kernel<<<grid, block>>>(
maxsize, S, nn_offset.data_ptr<int>(), nw_list.data_ptr<float>(),
select_mat.data_ptr<float>());
CHECK_RUNTIME_ERROR(cudaPeekAtLastError());
return select_mat;
}
// std::pair<torch::Tensor, torch::Tensor> get_grad_nn_list(torch::Tensor nn_offset, torch::Tensor nn_list) {
// int N = torch::size(nn_offset, 0) - 1;
// std::vector<std::vector<int>> grad_nn_v(N);
// std::vector<std::vector<int>> grad_v_offset(N);
// const int* nn_offset_ptr = nn_offset.data_ptr<int>();
// const int* nn_list_ptr = nn_list.data_ptr<int>();
// for (int u = 0; u < N; ++u) {
// int Ns = nn_offset_ptr[u + 1] - nn_offset_ptr[u];
// const int* nn = nn_list_ptr + nn_offset_ptr[u];
// for (int j = 0; j < Ns; ++j) {
// grad_nn_v[nn[j]].push_back(u);
// grad_v_offset[nn[j]].push_back(j);
// }
// }
// torch::Tensor grad_nn_offset = torch::zeros_like(nn_offset);
// int* grad_nn_offset_ptr = grad_nn_offset.data_ptr<int>();
// for (int i = 1; i <= N; ++i) {
// grad_nn_offset_ptr[i] = grad_nn_offset_ptr[i - 1] + grad_nn_v[i - 1].size();
// }
// torch::Tensor grad_nn_list = torch::zeros(grad_nn_offset_ptr[N] * 2, nn_list.options()); // N x Ns x 2
// int* grad_nn_list_ptr = grad_nn_list.data_ptr<int>();
// for (int u = 0; u < N; ++u) {
// int start = grad_nn_offset_ptr[u];
// int Ns = grad_nn_offset_ptr[u + 1] - grad_nn_offset_ptr[u];
// for (int i = 0; i < Ns; ++i) {
// grad_nn_list_ptr[(start + i) * 2] = grad_nn_v[u][i];
// grad_nn_list_ptr[(start + i) * 2 + 1] = grad_v_offset[u][i];
// }
// }
// return {grad_nn_offset, grad_nn_list};
// }
} // namespace fastpatch
|
a5f8995832f47bbbec09aa67bd54f5fc9a5fd232.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "trove_objects.h"
#include "cuda_objects.cuh"
#include "cuda_host.cuh"
#include "rocblas.h"
#include "Util.h"
#include <cstdio>
#include <cstdlib>
#include <omp.h>
const size_t max_dipole_size = 5l*1024l*1024l*1024l;
double pi = 4.0 * atan2(1.0,1.0);
double A_coef_s_1 = 64.0*pow(10.0,-36.0) * pow(pi,4.0) / (3.0 * 6.62606896*pow(10.0,-27.0));
double planck = 6.62606896*pow(10.0,-27.0);
double avogno = 6.0221415*pow(10.0,23.0);
double vellgt = 2.99792458*pow(10.0,10.0);
double intens_cm_mol = 8.0*pow(10.0,-36.0) * pow(pi,3.0)*avogno/(3.0*planck*vellgt);
double boltz = 1.380658*pow(10.0,-16.0);
//beta = planck * vellgt / (boltz * intensity%temperature)
void CheckCudaError(const char* tag){
// check for error
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("[%s] CUDA error: %s\n", tag,hipGetErrorString(error));
exit(-1);
}
}
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(hipblasStatus_t error)
{
switch (error)
{
case HIPBLAS_STATUS_SUCCESS:
return "HIPBLAS_STATUS_SUCCESS";
case HIPBLAS_STATUS_NOT_INITIALIZED:
return "HIPBLAS_STATUS_NOT_INITIALIZED";
case HIPBLAS_STATUS_ALLOC_FAILED:
return "HIPBLAS_STATUS_ALLOC_FAILED";
case HIPBLAS_STATUS_INVALID_VALUE:
return "HIPBLAS_STATUS_INVALID_VALUE";
case HIPBLAS_STATUS_ARCH_MISMATCH:
return "HIPBLAS_STATUS_ARCH_MISMATCH";
case HIPBLAS_STATUS_MAPPING_ERROR:
return "HIPBLAS_STATUS_MAPPING_ERROR";
case HIPBLAS_STATUS_EXECUTION_FAILED:
return "HIPBLAS_STATUS_EXECUTION_FAILED";
case HIPBLAS_STATUS_INTERNAL_ERROR:
return "HIPBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
// Print device properties
void printDevProp(hipDeviceProp_t devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
void get_cuda_info(FintensityJob & intensity){
int devCount;
hipGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
printDevProp(devProp);
} // Iterate through devices
}
int count_free_devices(){
int devCount;
int free_devices=0;
hipGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
hipSetDevice(i);
if(hipFree(0)==hipSuccess){
free_devices++;
hipDeviceReset();
}
} // Iterate through devices
return free_devices;
}
int get_free_device(int last){
int device_id=-1;
last++;
int devCount;
hipGetDeviceCount(&devCount);
for(int i=last; i< devCount; i++){
hipSetDevice(i);
if(hipFree(0)==hipSuccess){
hipDeviceReset();
return i;
}
}
return -1;
}
__host__ void copy_dipole_host(double* dipole_me,double** dipole_me_host,size_t & dip_size)
{
printf("Alloc");
hipHostMalloc(dipole_me_host,dip_size); //Malloc to pinned memory
printf("memcpy");
memcpy(dipole_me_host,dipole_me,dip_size);
}
__host__ void copy_array_to_gpu(void* arr,void** arr_gpu,size_t arr_size,const char* arr_name)
{
//Malloc dipole
if(hipSuccess != hipMalloc(arr_gpu,arr_size))
{
fprintf(stderr,"[copy_array_to_gpu]: couldn't malloc for %s \n",arr_name);
CheckCudaError(arr_name);
exit(0);
}
if(hipSuccess != hipMemcpy((*arr_gpu),arr,arr_size,hipMemcpyHostToDevice))
{
fprintf(stderr,"[copy_array_to_gpu]: error copying %s \n",arr_name);
exit(0);
}
};
//Copies relevant information needed to do intensity calculations onto the gpu
//Arguments p1: The bset_contr to copy p2: A device memory pointer to copy to
//Returns how much memory was used in bytes
__host__ size_t copy_bset_contr_to_gpu(TO_bset_contrT* bset_contr,cuda_bset_contrT* bset_gptr,int* ijterms,int sym_nrepres,int*sym_degen)
{
size_t memory_used = 0;
printf("Copying bset_contr for J=%i to gpu........",bset_contr->jval);
//construct a gpu_bset_contr
cuda_bset_contrT to_gpu_bset;
printf("copy easy part\n");
//Copy easy stuff
to_gpu_bset.jval = bset_contr->jval;
to_gpu_bset.Maxsymcoeffs = bset_contr->Maxsymcoeffs;
to_gpu_bset.max_deg_size = bset_contr->max_deg_size;
to_gpu_bset.Maxcontracts = bset_contr->Maxcontracts;
to_gpu_bset.Nclasses = bset_contr->Nclasses;
printf("copy icontr\n");
//GPU pointer to icontr2icase///////////////////////////////////////
int* icontr_gptr;
//Malloc in the gpu
if(hipSuccess != hipMalloc(&icontr_gptr,sizeof(int)*bset_contr->Maxcontracts*2))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't allocate memory for icontr2icase for J=%i\n",to_gpu_bset.jval);
exit(0);
}
memory_used += sizeof(int)*bset_contr->Maxcontracts*2;
//give the pointer to the cuda object
to_gpu_bset.icontr2icase = icontr_gptr;
//Copy over
if(hipSuccess != hipMemcpy(icontr_gptr,bset_contr->icontr2icase,sizeof(int)*bset_contr->Maxcontracts*2,hipMemcpyHostToDevice))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't copy icontr2icase to gpu for J=%i\n",to_gpu_bset.jval);
}
////////////////////////////////////////////////////////////////////////
printf("copy iroot\n");
////////////////////////////////Same for iroot_correlat_j0///////////////////////////////////////////////////
int* iroot_corr_gptr;
//Malloc in the gpu
if(hipSuccess != hipMalloc (&iroot_corr_gptr , sizeof(int)*bset_contr->Maxcontracts ) )
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't allocate memory for iroot_correlat_j0 for J=%i\n",to_gpu_bset.jval);
exit(0);
}
//give the pointer to the cuda object
to_gpu_bset.iroot_correlat_j0 = iroot_corr_gptr;
memory_used += sizeof(int)*bset_contr->Maxcontracts; //Add memory used
//Copy over
if(hipSuccess != hipMemcpy(iroot_corr_gptr,bset_contr->iroot_correlat_j0,sizeof(int)*bset_contr->Maxcontracts,hipMemcpyHostToDevice))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't copy iroot_correlat_j0 to gpu for J=%i\n",to_gpu_bset.jval);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////// K ////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
printf("copy K\n");
int* k_gptr;
//Malloc in the gpu
if(hipSuccess != hipMalloc(&k_gptr,sizeof(int)*bset_contr->Maxcontracts))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't allocate memory for K for J=%i\n",to_gpu_bset.jval);
exit(0);
}
//give the pointer to the cuda object
to_gpu_bset.k = k_gptr;
memory_used += sizeof(int)*bset_contr->Maxcontracts;
//Copy over
if(hipSuccess != hipMemcpy(k_gptr,bset_contr->k,sizeof(int)*bset_contr->Maxcontracts,hipMemcpyHostToDevice))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't copy k to gpu for J=%i\n",to_gpu_bset.jval);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////// KTau ////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
printf("copy Ktau\n");
int* kt_gptr;
//Malloc in the gpu
if(hipSuccess != hipMalloc(&kt_gptr,sizeof(int)*bset_contr->Maxcontracts))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't allocate memory for Ktau for J=%i\n",to_gpu_bset.jval);
exit(0);
}
//give the pointer to the cuda object
to_gpu_bset.ktau = kt_gptr;
memory_used += sizeof(int)*bset_contr->Maxcontracts;
//Copy over
if(hipSuccess != hipMemcpy(kt_gptr,bset_contr->ktau,sizeof(int)*bset_contr->Maxcontracts,hipMemcpyHostToDevice))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't copy ktau to gpu for J=%i\n",to_gpu_bset.jval);
exit(0);
}
///////////////////////////////////////////////N///////////////////////////////////////////////////////////////////
printf("copy N\n");
int* N_gptr;
if(hipSuccess != hipMalloc(&N_gptr,sizeof(int)*sym_nrepres*bset_contr->Maxsymcoeffs))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't allocate memory for N for J=%i\n",to_gpu_bset.jval);
}
memory_used += sizeof(int)*sym_nrepres*bset_contr->Maxsymcoeffs;
to_gpu_bset.N = N_gptr;
printf("Malloc\n");
int* Ncopy = (int*)malloc(sizeof(int)*sym_nrepres*bset_contr->Maxsymcoeffs);
printf("Make copy\n");
for(int i = 0; i < sym_nrepres; i++){
for(int j = 0; j < bset_contr->Maxsymcoeffs; j++)
{
Ncopy[ i + (j*sym_nrepres)] = bset_contr->irr[i].N[j];
//printf("N[%i,%i] = %i %i\n",i,j,Ncopy[ i + (j*sym_nrepres)],bset_contr->irr[i].N[j]);
}
}
printf("Copy\n");
hipMemcpy(N_gptr,Ncopy,sizeof(int)*sym_nrepres*bset_contr->Maxsymcoeffs,hipMemcpyHostToDevice);
to_gpu_bset.N = N_gptr;
free(Ncopy);
////////////////////////////////////////////////////////////////////////////////////////
printf("copy Ntotal\n");
//////////////////////////////N total////////////////////////////////////////////////////////
int* Ntot_gptr;
copy_array_to_gpu((void*)bset_contr->Ntotal,(void**)&Ntot_gptr,sizeof(int)*sym_nrepres,"Ntotal");
to_gpu_bset.Ntotal = Ntot_gptr;
///////////////////////////////////////////
printf("copy irr_repres\n");
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////// irre_repres ////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
double** irr_gptr;
if(hipSuccess != hipMalloc(&irr_gptr,sizeof(double*)*sym_nrepres))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't allocate memory for irreducible representation for J=%i\n",to_gpu_bset.jval);
exit(0);
}
memory_used += sizeof(double*)*sym_nrepres;
to_gpu_bset.irr_repres = irr_gptr;
//Hold pointers to doubles
double** d_ptr = (double**)malloc(sizeof(double*)*sym_nrepres);
for(int i =0; i < sym_nrepres; i++)
{
if(hipSuccess != hipMalloc(&d_ptr[i],sizeof(double)*bset_contr->Ntotal[i]*sym_degen[i]*bset_contr->mat_size))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't allocate memory for irreducible representation for J=%i\n",to_gpu_bset.jval);
exit(0);
}
memory_used += sizeof(double)*bset_contr->Ntotal[i]*sym_degen[i]*bset_contr->mat_size;
//copy repres to irr_repres
hipMemcpy(d_ptr[i],bset_contr->irr[i].repres,sizeof(double)*bset_contr->Ntotal[i]*sym_degen[i]*bset_contr->mat_size,hipMemcpyHostToDevice);
}
//copy pointerlist to irr_gptr;
hipMemcpy(irr_gptr,d_ptr,sizeof(double*)*sym_nrepres,hipMemcpyHostToDevice);
free(d_ptr); //clear memory and pointer
d_ptr = 0;
printf("copy ijterms size = %i\n",bset_contr->Maxsymcoeffs*sym_nrepres);
//Copy ijterms
copy_array_to_gpu((void*)ijterms,(void**)&(to_gpu_bset.ijterms),sizeof(int)*bset_contr->Maxsymcoeffs*sym_nrepres,"ijterms");
memory_used += sizeof(int)*bset_contr->Maxsymcoeffs*sym_nrepres;
printf("copy final bset\n");
/////////////////////////////////copy object over////////////////////////////////
hipMemcpy(bset_gptr,&to_gpu_bset,sizeof(cuda_bset_contrT),hipMemcpyHostToDevice);
printf(".....done!\n");
return memory_used;
};
__host__ size_t create_and_copy_bset_contr_to_gpu(TO_bset_contrT* bset_contr,cuda_bset_contrT** bset_gptr,int* ijterms,int sym_nrepres,int*sym_degen)
{
if(hipSuccess != hipMalloc(bset_gptr,sizeof(cuda_bset_contrT) ) )
{
fprintf(stderr,"[create_and_copy_bset_contr_to_gpu]: Couldn't allocate memory for bset\n");
exit(0);
}
return copy_bset_contr_to_gpu( bset_contr,*bset_gptr,ijterms,sym_nrepres,sym_degen);
}
//Copy threej
__host__ void copy_threej_to_gpu(double* threej,double** threej_gptr, int jmax)
{
copy_array_to_gpu((void*)threej,(void**) threej_gptr, (jmax+1)*(jmax+1)*3*3*sizeof(double),"three_j");
};
///////////Dipole stuff now
__host__ void dipole_initialise(FintensityJob* intensity){
printf("Begin Input\n");
read_fields(intensity);
printf("End Input\n");
//Wake up the gpu//
printf("Wake up gpu\n");
hipFree(0);
printf("....Done!\n");
int jmax = max(intensity->jvals[0],intensity->jvals[1]);
bset_contr_factory(&(intensity->bset_contr[0]),0,intensity->molec.sym_degen,intensity->molec.sym_nrepres);
bset_contr_factory(&(intensity->bset_contr[1]),intensity->jvals[0],intensity->molec.sym_degen,intensity->molec.sym_nrepres);
bset_contr_factory(&(intensity->bset_contr[2]),intensity->jvals[1],intensity->molec.sym_degen,intensity->molec.sym_nrepres);
//Correlate them
correlate_index(intensity->bset_contr[0],intensity->bset_contr[0]);
correlate_index(intensity->bset_contr[0],intensity->bset_contr[1]);
correlate_index(intensity->bset_contr[0],intensity->bset_contr[2]);
printf("Reading dipole\n");
//Read the dipole
read_dipole(intensity->bset_contr[0],&(intensity->dipole_me),intensity->dip_size);
printf("Computing threej\n");
//Compute threej
precompute_threej(&(intensity->threej),jmax);
//ijterms
printf("Computing ijerms\n");
compute_ijterms((intensity->bset_contr[1]),&(intensity->bset_contr[1].ijterms),intensity->molec.sym_nrepres);
compute_ijterms((intensity->bset_contr[2]),&(intensity->bset_contr[2].ijterms),intensity->molec.sym_nrepres);
//Read eigenvalues
read_eigenvalues((*intensity));
unsigned int dimenmax = 0;
unsigned int nsizemax = 0;
intensity->dimenmax = 0;
intensity->nsizemax = 0;
//Find nsize
for(int i =0; i < intensity->molec.sym_nrepres; i++){
if(intensity->isym_do[i]){
nsizemax= max(intensity->bset_contr[1].nsize[i],nsizemax);
nsizemax = max(intensity->bset_contr[2].nsize[i],nsizemax);
}
}
printf("Biggest vector dimensiton is %u \n",nsizemax);
for(int i = 0; i < 3; i++){
printf("dimenmax = %u J=%i Maxcontracts =%i\n",dimenmax,intensity->bset_contr[i].jval,intensity->bset_contr[i].Maxcontracts);
dimenmax = max(intensity->bset_contr[i].Maxcontracts,dimenmax);
}
//intensity->dimenmax = max(intensity->bset_contr[2].Maxcontracts,intensity->dimenmax);
printf("Biggest max contraction is is %u \n",dimenmax);
intensity->dimenmax = dimenmax;
intensity->nsizemax = nsizemax;
printf("Find igamma pairs\n");
find_igamma_pair((*intensity));
printf("done!\n");
//Begin GPU related initalisation////////////////////////////////////////////////////////
intensity_info int_gpu;
//Copy over constants to GPU
int_gpu.sym_nrepres = intensity->molec.sym_nrepres;
int_gpu.jmax = jmax+1;
int_gpu.dip_stride_1 = intensity->bset_contr[0].Maxcontracts;
int_gpu.dip_stride_2 = intensity->bset_contr[0].Maxcontracts*intensity->bset_contr[0].Maxcontracts;
int_gpu.dimenmax = intensity->dimenmax;
int_gpu.sq2 = 1.0/sqrt(2.0);
copy_array_to_gpu((void*)intensity->molec.sym_degen,(void**)&int_gpu.sym_degen,sizeof(int)*intensity->molec.sym_nrepres,"sym_degen");
CheckCudaError("Pre-initial");
printf("Copy intensity information\n");
copy_intensity_info(&int_gpu);
printf("done\n");
CheckCudaError("Post-initial");
printf("Copying bset_contrs to GPU...\n");
intensity->g_ptrs.bset_contr = new cuda_bset_contrT*[2];
create_and_copy_bset_contr_to_gpu(&intensity->bset_contr[1],&(intensity->g_ptrs.bset_contr[0]),intensity->bset_contr[1].ijterms,intensity->molec.sym_nrepres,intensity->molec.sym_degen);
create_and_copy_bset_contr_to_gpu(&intensity->bset_contr[2],&(intensity->g_ptrs.bset_contr[1]),intensity->bset_contr[2].ijterms,intensity->molec.sym_nrepres,intensity->molec.sym_degen);
printf("Done\n");
printf("Copying threej...\n");
copy_threej_to_gpu(intensity->threej,&(intensity->g_ptrs.threej), jmax);
printf("done\n");
printf("Copying dipole\n");
copy_array_to_gpu((void*)intensity->dipole_me,(void**)&(intensity->g_ptrs.dipole_me),intensity->dip_size,"dipole_me");
printf("Done..");
//exit(0);
//Will improve
intensity->gpu_memory = 1l*1024l*1024l*1024l;
intensity->cpu_memory = 1l*1024l*1024l*1024l;
};
__host__ void dipole_do_intensities(FintensityJob & intensity){
//Prinf get available cpu memory
unsigned long available_cpu_memory = intensity.cpu_memory;
unsigned long available_gpu_memory = intensity.gpu_memory;
//Compute how many inital state vectors and final state vectors
unsigned long no_final_states_cpu = ((available_cpu_memory)/8l - long(2*intensity.dimenmax))/(3l*intensity.dimenmax);//(Initial + vec_cor + half_ls)*dimen_max
unsigned long no_final_states_gpu = ((available_gpu_memory)/8l - long(2*intensity.dimenmax))/(3l*intensity.dimenmax);//(Initial + vec_cor + half_ls)*dimen_max
printf("No of final states in gpu_memory: %d cpu memory: %d\n",no_final_states_gpu,no_final_states_cpu);
//The intial state vector
double* initial_vec = new double[intensity.dimenmax];
double* gpu_initial_vec=NULL;
copy_array_to_gpu((void*)initial_vec,(void**)&(gpu_initial_vec),sizeof(double)*intensity.dimenmax,"gpu_initial_vec");
printf("%p\n",gpu_initial_vec);
double* final_vec = new double[intensity.dimenmax];
double* gpu_final_vec=NULL;
copy_array_to_gpu((void*)final_vec,(void**)&(gpu_final_vec),sizeof(double)*intensity.dimenmax,"gpu_final_vec");
double* corr_vec = new double[intensity.dimenmax];
double* gpu_corr_vec=NULL;
copy_array_to_gpu((void*)corr_vec,(void**)&(gpu_corr_vec),sizeof(double)*intensity.dimenmax,"gpu_corr_vec");
double* half_ls = new double[intensity.dimenmax];
double** gpu_half_ls=new double*[2];
copy_array_to_gpu((void*)half_ls,(void**)&(gpu_half_ls[0]),sizeof(double)*intensity.dimenmax,"gpu_half_ls1");
copy_array_to_gpu((void*)half_ls,(void**)&(gpu_half_ls[1]),sizeof(double)*intensity.dimenmax,"gpu_half_ls2");
double line_str =0.0;
char filename[1024];
//Get the filename
printf("Open vector unit\n");
FILE** eigenvec_unit = new FILE*[2*intensity.molec.sym_nrepres];
for(int i =0; i< 2; i++){
for(int j = 0; j < intensity.molec.sym_nrepres; j++)
{
sprintf(filename,j0eigen_vector_gamma_filebase,intensity.jvals[i],j+1);
printf("Reading %s\n",filename);
eigenvec_unit[i + j*2] = fopen(filename,"r");
if(eigenvec_unit[i + j*2] == NULL)
{
printf("error opening %s \n",filename);
exit(0);
}
}
}
//Opened all units, now lets start compuing
//Initialise cublas
hipblasHandle_t handle;
hipblasStatus_t stat;
stat = hipblasCreate(&handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return;
}
CheckCudaError("Initialisation");
// Number of threads in each thread block
int blockSize =256;
// Number of thread blocks in grid
int gridSize = (int)ceil((float)intensity.dimenmax/blockSize);
printf("Nu_if\tJf Kf quantaF\t <-- \tJI KI tauI quantaI\t Ein_A\tLine_str\n");
//Run
for(int ilevelI = 0; ilevelI < intensity.Neigenlevels; ilevelI++){
// ! start measuring time per line
// !
int indI = intensity.eigen[ilevelI].jind;
// !
// !dimension of the bases for the initial states
// !
int dimenI = intensity.bset_contr[indI+1].Maxcontracts;
// !
// !energy, quanta, and gedeneracy order of the initial state
// !
int jI = intensity.eigen[ilevelI].jval;
double energyI = intensity.eigen[ilevelI].energy;
int igammaI = intensity.eigen[ilevelI].igamma;
int * quantaI = intensity.eigen[ilevelI].quanta;
int * normalI = intensity.eigen[ilevelI].normal;
int ndegI = intensity.eigen[ilevelI].ndeg;
int nsizeI = intensity.bset_contr[indI+1].nsize[igammaI];
FILE* unitI = eigenvec_unit[ indI + (igammaI)*2];
// printf("Ilevel = %i\n",ilevelI);
if(!energy_filter_lower(intensity,jI,energyI,quantaI)) continue;
fseek(unitI,(intensity.eigen[ilevelI].irec[0]-1)*nsizeI*sizeof(double),SEEK_SET);
//Read vector from file
// printf("Read vector\n");
int tread = fread(initial_vec,sizeof(double),nsizeI,unitI);
//for(int i=0; i< nsizeI; i++){
// printf("vec[%i]=%16.8e\n",i,initial_vec[i]);}
//printf("read = %i\n",tread);
//Transfer it to the GPU
// printf("Transfer vector\n");
stat = hipblasSetVector(intensity.dimenmax, sizeof(double),initial_vec, 1, gpu_initial_vec, 1);
CheckCudaError("Set Vector I");
hipDeviceSynchronize();
// printf("Correlating vectors\n");
//for(int ideg = 0; ideg < ndegI; ideg++){
//host_correlate_vectors(&intensity.bset_contr[indI+1],0,igammaI,intensity.bset_contr[indI+1].ijterms,intensity.molec.sym_degen,initial_vec,corr_vec);
hipLaunchKernelGGL(( device_correlate_vectors), dim3(gridSize),dim3(blockSize), 0, 0, intensity.g_ptrs.bset_contr[indI],0,igammaI, gpu_initial_vec,gpu_corr_vec);
CheckCudaError("device correlate I");
hipDeviceSynchronize();
//
//printf("Done\n");
printf("J= %i energy = %11.4f\n",jI,energyI);
printf("----------------------------------\n");
for(int indF=0; indF <2; indF++){
hipLaunchKernelGGL(( device_compute_1st_half_ls), dim3(gridSize),dim3(blockSize), 0, 0, intensity.g_ptrs.bset_contr[indI],intensity.g_ptrs.bset_contr[indF],intensity.g_ptrs.dipole_me,igammaI,gpu_corr_vec,intensity.g_ptrs.threej,gpu_half_ls[indF]);
//CheckCudaError("compute half ls I");
//hipDeviceSynchronize();
//hipblasGetVector(dimenI, sizeof(double),gpu_half_ls[indF], 1, half_ls, 1);
//for(int i=0; i< dimenI; i++){
// printf("half_ls[%i]=%16.8e\n",i,half_ls[i]);}
//printf("----------------------------------\n");
}
//Final states
for(int ilevelF = 0; ilevelF < intensity.Neigenlevels; ilevelF++){
// ! start measuring time per line
// !
int indF = intensity.eigen[ilevelF].jind;
// !
//printf("indF=%i",indF);
// !dimension of the bases for the initial states
// !
int dimenF = intensity.bset_contr[indF+1].Maxcontracts;
// !
// !energy, quanta, and gedeneracy order of the initial state
// !
int jF = intensity.eigen[ilevelF].jval;
double energyF = intensity.eigen[ilevelF].energy;
int igammaF = intensity.eigen[ilevelF].igamma;
int * quantaF = intensity.eigen[ilevelF].quanta;
int * normalF = intensity.eigen[ilevelF].normal;
int ndegF = intensity.eigen[ilevelF].ndeg;
int nsizeF = intensity.bset_contr[indF+1].nsize[igammaF];
FILE* unitF = eigenvec_unit[ indF + (igammaF)*2];
if(!energy_filter_upper(intensity,jF,energyF,quantaF)) continue;
for(int i = 0; i < intensity.dimenmax; i++){
final_vec[i]=0.0;
}
fseek(unitF,(intensity.eigen[ilevelF].irec[0]-1)*nsizeF*sizeof(double),SEEK_SET);
//Read vector from file
fread(final_vec,sizeof(double),nsizeF,unitF);
//for(int i=0; i< dimenF; i++){
// printf("ivec[%i]=%16.8e\n",i,final_vec[i]);}
if(!intensity_filter(intensity,jI,jF,energyI,energyF,igammaI,igammaF,quantaI,quantaF)) continue;
//device_clear_vector<<<gridSize,blockSize>>>(gpu_final_vec);
//Transfer it to the GPU
stat = hipblasSetVector(intensity.dimenmax, sizeof(double),final_vec, 1, gpu_final_vec, 1);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS SetVector F failed\n");
printf ("Error code: %s\n",_cudaGetErrorEnum(stat));
return;
}
double nu_if = energyF - energyI;
//for(int ideg = 0; ideg < ndegF; ideg++){
hipLaunchKernelGGL(( device_correlate_vectors), dim3(gridSize),dim3(blockSize), 0, 0, intensity.g_ptrs.bset_contr[indF],0,igammaF, gpu_final_vec,gpu_corr_vec);
CheckCudaError("correlate final vector");
hipDeviceSynchronize();
//hipblasGetVector(dimenF, sizeof(double),gpu_corr_vec, 1, corr_vec, 1);
//for(int i=0; i< dimenF; i++){
// printf("ivec[%i]=%16.8e\n",i,corr_vec[i]);}
//}
//
hipDeviceSynchronize();
//Compute ls
// for(int i = 0; i < dimenF; i++)
// printf("%11.4e\n",corr_vec[i]);
// //exit(0);
line_str = 0;
//hipblasDdot (handle,intensity.dimenmax,gpu_half_ls[indF], 1,gpu_corr_vec, 1,&line_str);
hipblasDdot (handle, intensity.dimenmax, gpu_corr_vec, 1, gpu_half_ls[indF], 1, &line_str);
//hipblasDdot (handle, intensity.dimenmax, gpu_half_ls[indF], 1, gpu_half_ls[indF], 1, &line_str);
double orig_ls = line_str;
//Print intensitys
line_str *= line_str;
//printf("line_str %11.4e\n",line_str);
double A_einst = A_coef_s_1*double((2*jI)+1)*line_str*pow(abs(nu_if),3);
line_str = line_str * intensity.gns[igammaI] * double( (2*jI + 1)*(2 * jF + 1) );
//if(line_str < 0.00000000001) continue;
/*
write(out, "( (i4, 1x, a4, 3x),'<-', (i4, 1x, a4, 3x),a1,&
&(2x, f11.4,1x),'<-',(1x, f11.4,1x),f11.4,2x,&
&'(',1x,a3,x,i3,1x,')',1x,'(',1x,<nclasses>(x,a3),1x,<nmodes>(1x, i3),1x,')',1x,'<- ', &
&'(',1x,a3,x,i3,1x,')',1x,'(',1x,<nclasses>(x,a3),1x,<nmodes>(1x, i3),1x,')',1x, &
& 3(1x, es16.8),2x,(1x,i6,1x),'<-',(1x,i6,1x),i8,1x,i8,&
1x,'(',1x,<nmodes>(1x, i3),1x,')',1x,'<- ',1x,'(',1x,<nmodes>(1x, i3),1x,')',1x,&
<nformat>(1x, es16.8))") &
!
jF,sym%label(igammaF),jI,sym%label(igammaI),branch, &
energyF-intensity%ZPE,energyI-intensity%ZPE,nu_if, &
eigen(ilevelF)%cgamma(0),eigen(ilevelF)%krot,&
eigen(ilevelF)%cgamma(1:nclasses),eigen(ilevelF)%quanta(1:nmodes), &
eigen(ilevelI)%cgamma(0),eigen(ilevelI)%krot,&
eigen(ilevelI)%cgamma(1:nclasses),eigen(ilevelI)%quanta(1:nmodes), &
linestr,A_einst,absorption_int,&
eigen(ilevelF)%ilevel,eigen(ilevelI)%ilevel,&
itransit,istored(ilevelF),normalF(1:nmodes),normalI(1:nmodes),&
linestr_deg(1:ndegI,1:ndegF)
endif
*/
printf("%11.4f\t(%i %i ) ( ",nu_if,jF,intensity.eigen[ilevelF].krot);
for(int i = 0; i < intensity.molec.nmodes+1; i++)
printf("%i ",quantaF[i]);
printf(")\t <-- \t(%i %i ) ",jI,intensity.eigen[ilevelI].krot);
for(int i = 0; i < intensity.molec.nmodes+1; i++)
printf("%i ",quantaI[i]);
printf("\t %16.8e %16.8e %16.8e\n",A_einst,line_str,orig_ls);
//exit(0);
}
}
}
__host__ void do_1st_half_ls(cuda_bset_contrT* bset_contrI,cuda_bset_contrT* bset_contrF,int dimenMax,int idegI,int igammaI,double* dipole_me,double* vecI,double* vec,double* threej,double* half_ls,hipStream_t stream = 0){
int blockSize = 512;
int gridSize = gridSize = (int)ceil((float)dimenMax/blockSize);
hipLaunchKernelGGL(( device_correlate_vectors), dim3(gridSize),dim3(blockSize),0,stream, bset_contrI,idegI,igammaI, vecI,vec);
blockSize = 64;
int numSMs;
hipDeviceGetAttribute(&numSMs, hipDeviceAttributeMultiprocessorCount, 0);
hipLaunchKernelGGL(( device_compute_1st_half_ls_flipped_dipole), dim3(numSMs*40),dim3(blockSize),0,stream, bset_contrI,bset_contrF,
dipole_me,vec,threej,
half_ls);
}
__host__ void do_1st_half_ls_blocks(cuda_bset_contrT* bset_contrI,cuda_bset_contrT* bset_contrF,int dimenMax,int idegI,int igammaI,double* gpu_dipole,FDipole_ptrs & dipole_me,double* vecI,double* vec,double* threej,double* half_ls,hipStream_t stream = 0){
int blockSize = 512;
int gridSize = (int)ceil((float)dimenMax/blockSize);
hipLaunchKernelGGL(( device_correlate_vectors), dim3(gridSize),dim3(blockSize),0,stream, bset_contrI,idegI,igammaI, vecI,vec);
CheckCudaError("correlate");
blockSize = 64;
gridSize = (int)ceil((float)dimenMax/blockSize);
int parts = dipole_me.parts;
// printf("parts = %i\n",parts);
for(int i = 0; i < parts; i++){
// printf("i=%i\n startF = %i endF = %i ncontr = %i",i, dipole_me.dip_block[i].startF, dipole_me.dip_block[i].endF ,dipole_me.dip_block[i].ncontrF );
hipLaunchKernelGGL(( device_compute_1st_half_ls_flipped_dipole_blocks), dim3(gridSize),dim3(blockSize),0,stream, bset_contrI,bset_contrF,
dipole_me.dip_block[i].startF,dipole_me.dip_block[i].endF,dipole_me.dip_block[i].ncontrF,gpu_dipole,vec,threej,
half_ls); //Compute half ls
//Transfer next block
CheckCudaError("half_ls");
if((i+1) >= parts){
//printf("memcopy %i/%i\n",i+1,parts);
//printf("size=%zu\n gpu_dipole = %p dipole_me = %p \n",dipole_me.dip_block[0].size,gpu_dipole,dipole_me.dip_block[0].dipole_me);
hipMemcpyAsync(gpu_dipole,dipole_me.dip_block[0].dipole_me,dipole_me.dip_block[0].size,hipMemcpyHostToDevice,stream) ;
}else{
// printf("memcopy %i\n",i);
// printf("size=%zu\n gpu_dipole = %p dipole_me = %p \n",dipole_me.dip_block[i+1].size,gpu_dipole,dipole_me.dip_block[i+1].dipole_me);
hipMemcpyAsync(gpu_dipole,dipole_me.dip_block[i+1].dipole_me,dipole_me.dip_block[i+1].size,hipMemcpyHostToDevice,stream) ;
}
CheckCudaError("Memcpy");
//hipDeviceSynchronize();
}
//exit(0);
//exit(0);
}
__host__ void do_1st_half_ls_branch(cuda_bset_contrT* bset_contrI,cuda_bset_contrT* bset_contrF,int dimenMax,int idegI,int igammaI,double* dipole_me,double* vecI,double* vec,double* threej,double* half_ls,hipStream_t stream = 0){
int blockSize = 512;
int gridSize = gridSize = (int)ceil((float)dimenMax/blockSize);
hipLaunchKernelGGL(( device_correlate_vectors), dim3(gridSize),dim3(blockSize),0,stream, bset_contrI,idegI,igammaI, vecI,vec);
blockSize = 256;
int numSMs;
hipDeviceGetAttribute(&numSMs, hipDeviceAttributeMultiprocessorCount, 0);
hipLaunchKernelGGL(( device_compute_1st_half_ls_flipped_dipole_branch), dim3(numSMs/2),dim3(blockSize),0,stream, bset_contrI,bset_contrF,
dipole_me,vec,threej,
half_ls);
}
/////////////////////////////--------------------Multi-threaded verstions--------------------///////////////////////////////////////////////
__host__ void dipole_initialise_cpu(FintensityJob* intensity){
printf("Begin Input\n");
read_fields(intensity);
printf("End Input\n");
int jmax = max(intensity->jvals[0],intensity->jvals[1]);
//Now create the bset_contrs
printf("Sym_nrepsres = %i\n",intensity->molec.sym_nrepres);
//Now create the bset_contrs
bset_contr_factory(&(intensity->bset_contr[0]),0,intensity->molec.sym_degen,intensity->molec.sym_nrepres);
intensity->molec.nclasses = intensity->bset_contr[0].Nclasses;
bset_contr_factory(&(intensity->bset_contr[1]),intensity->jvals[0],intensity->molec.sym_degen,intensity->molec.sym_nrepres);
bset_contr_factory(&(intensity->bset_contr[2]),intensity->jvals[1],intensity->molec.sym_degen,intensity->molec.sym_nrepres);
//Correlate them
correlate_index(intensity->bset_contr[0],intensity->bset_contr[0]);
correlate_index(intensity->bset_contr[0],intensity->bset_contr[1]);
correlate_index(intensity->bset_contr[0],intensity->bset_contr[2]);
printf("Check dipole size\n");
size_t dipole_size = GetFilenameSize("j0_extfield.chk");
if(dipole_size > max_dipole_size){
printf("Splitting dipole\n");
read_dipole_flipped_blocks(intensity->bset_contr[0],intensity->dipole_blocks,2);
intensity->split_dipole = true;
}else{
//Read the dipole
read_dipole_flipped(intensity->bset_contr[0],&(intensity->dipole_me),intensity->dip_size);
intensity->split_dipole = false;
}
printf("Computing threej\n");
//Compute threej
precompute_threej(&(intensity->threej),jmax);
//ijterms
printf("Computing ijerms\n");
compute_ijterms((intensity->bset_contr[1]),&(intensity->bset_contr[1].ijterms),intensity->molec.sym_nrepres);
compute_ijterms((intensity->bset_contr[2]),&(intensity->bset_contr[2].ijterms),intensity->molec.sym_nrepres);
//Read eigenvalues
read_eigenvalues((*intensity));
intensity->dimenmax = 0;
intensity->nsizemax = 0;
//Find nsize
for(int i =0; i < intensity->molec.sym_nrepres; i++){
if(intensity->isym_do[i]){
intensity->nsizemax= max(intensity->bset_contr[1].nsize[i],intensity->nsizemax);
intensity->nsizemax = max(intensity->bset_contr[2].nsize[i],intensity->nsizemax);
}
}
printf("Biggest vector dimensiton is %i \n",intensity->nsizemax);
intensity->dimenmax = max(intensity->bset_contr[1].Maxcontracts,intensity->dimenmax);
intensity->dimenmax = max(intensity->bset_contr[2].Maxcontracts,intensity->dimenmax);
printf("Biggest max contraction is is %i \n",intensity->dimenmax);
printf("Find igamma pairs\n");
find_igamma_pair((*intensity));
printf("done!\n");
};
__host__ void dipole_initialise_gpu(FintensityJob * intensity, FGPU_ptrs & g_ptrs,int device_id){
int jmax = max(intensity->jvals[0],intensity->jvals[1]);
//Get available memory
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, device_id);
g_ptrs.avail_mem = size_t(double(devProp.totalGlobalMem)*0.95);
printf("Available gpu memory = %2.4f GB",float(g_ptrs.avail_mem)/(1024.0f*1024.0f*1024.0f));
printf("Total global memory: %zu\n", devProp.totalGlobalMem);
//Begin GPU related initalisation////////////////////////////////////////////////////////
intensity_info int_gpu;
//Copy over constants to GPU
int_gpu.sym_nrepres = intensity->molec.sym_nrepres;
int_gpu.jmax = jmax+1;
int_gpu.dip_stride_1 = intensity->bset_contr[0].Maxcontracts;
int_gpu.dip_stride_2 = intensity->bset_contr[0].Maxcontracts*intensity->bset_contr[0].Maxcontracts;
int_gpu.dimenmax = intensity->dimenmax;
int_gpu.sq2 = 1.0/sqrt(2.0);
printf("Sym max_degen = %i\n",intensity->molec.sym_maxdegen);
copy_array_to_gpu((void*)intensity->molec.sym_degen,(void**)&int_gpu.sym_degen,sizeof(int)*intensity->molec.sym_nrepres,"sym_degen");
g_ptrs.avail_mem -= sizeof(int)*intensity->molec.sym_nrepres;
CheckCudaError("Pre-initial");
printf("Copy intensity information...");
copy_intensity_info(&int_gpu);
printf("done...");
CheckCudaError("Post-initial");
printf("Copying bset_contrs to GPU...");
g_ptrs.bset_contr = new cuda_bset_contrT*[2];
g_ptrs.avail_mem -= create_and_copy_bset_contr_to_gpu(&intensity->bset_contr[1],&(g_ptrs.bset_contr[0]),intensity->bset_contr[1].ijterms,intensity->molec.sym_nrepres,intensity->molec.sym_degen);
g_ptrs.avail_mem -= create_and_copy_bset_contr_to_gpu(&intensity->bset_contr[2],&(g_ptrs.bset_contr[1]),intensity->bset_contr[2].ijterms,intensity->molec.sym_nrepres,intensity->molec.sym_degen);
printf("Done..");
printf("Copying threej...");
copy_threej_to_gpu(intensity->threej,&(g_ptrs.threej), jmax);
g_ptrs.avail_mem -=(jmax+1)*(jmax+1)*3*3*sizeof(double);
printf("done..");
/*
if(intensity->dip_size > g_ptrs.avail_mem)
{
printf("Dipole too large to fit into gpu memory, leaving on host gpu_avail = %zu dipole_size = %zu\n",g_ptrs.avail_mem,intensity->dip_size);
if(omp_get_thread_num()==0) intensity->host_dipole=true;
}else{
printf("Copying dipole...");
copy_array_to_gpu((void*)intensity->dipole_me,(void**)&(g_ptrs.dipole_me),intensity->dip_size,"dipole_me");
g_ptrs.avail_mem -=intensity->dip_size;
intensity->host_dipole=false;
}
#pragma omp barrier
if(intensity->host_dipole && omp_get_thread_num()==0){
printf("Copying dipole\n");
//double* replacement_dipole;
printf("Allocing memory....");
if(hipSuccess != hipHostRegister(intensity->dipole_me,intensity->dip_size,hipHostMallocPortable | hipHostMallocMapped | hipHostMallocWriteCombined)) printf("Could not malloc!!!\n");
CheckCudaError("Dipole!");
printf("copying....");
//memcpy(replacement_dipole,intensity->dipole_me,intensity->dip_size);
//copy_dipole_host(intensity->dipole_me,&replacement_dipole,intensity->dip_size);
printf("Done");
//Clear dipole from memory
//delete[] intensity->dipole_me;
//Put new dipole
//intensity->dipole_me = replacement_dipole;
}
*/
intensity->host_dipole=false;
if(intensity->split_dipole){
size_t malloc_size = intensity->dipole_blocks.dip_block[0].size;
//for(int i = 0; i < intensity->dipole_blocks.parts; i++){
// malloc_size = max((unsigned long long)malloc_size,(unsigned long long)intensity->dipole_blocks.dip_block[i].size);
//}
printf("maqlloc size is %zu\n",malloc_size);
copy_array_to_gpu((void*)intensity->dipole_blocks.dip_block[0].dipole_me,(void**)&(g_ptrs.dipole_me),malloc_size,"dipole_me_block");
g_ptrs.avail_mem -=malloc_size ;
}else{
copy_array_to_gpu((void*)intensity->dipole_me,(void**)&(g_ptrs.dipole_me),intensity->dip_size,"dipole_me");
g_ptrs.avail_mem -=intensity->dip_size;
intensity->host_dipole=false;
}
#pragma omp barrier
printf("Left over memory is %zu bytes\n",g_ptrs.avail_mem);
printf("Done\n");
}
__host__ void dipole_do_intensities_async_omp(FintensityJob & intensity,int device_id,int num_devices){
hipDeviceReset(); // clears all the runtime state for the current thread
hipSetDevice(device_id); //Set the device name
//Wake up the gpu//
//printf("Wake up gpu\n");
hipFree(0);
//printf("....Done!\n");
int current_stream = 0;
int nJ = 2;
//Setup the gpu pointers
FGPU_ptrs g_ptrs;
dipole_initialise_gpu(&intensity,g_ptrs,device_id); // Initialise the gpu pointers
//Prinf get available cpu memory
//unsigned long available_cpu_memory = intensity.cpu_memory;
size_t available_gpu_memory = g_ptrs.avail_mem;
//Compute how many inital state vectors and final state vectors
//unsigned long no_final_states_cpu = ((available_cpu_memory)/8l - long(2*intensity.dimenmax))/(3l*intensity.dimenmax);//(Initial + vec_cor + half_ls)*dimen_max
size_t no_final_states_gpu = available_gpu_memory/sizeof(double);
no_final_states_gpu -= intensity.nsizemax + intensity.dimenmax*nJ*intensity.molec.sym_maxdegen;
no_final_states_gpu /= ( intensity.nsizemax + intensity.dimenmax );
printf("We can fit %zu states in the GPU memory\n",no_final_states_gpu);
//no_final_states_gpu /=2;
//no_final_states_gpu = 10;
printf("%zu\n",no_final_states_gpu);
no_final_states_gpu = min((unsigned int )intensity.Neigenlevels,(unsigned int )no_final_states_gpu);
printf("%d\n",no_final_states_gpu);
//Create Stream variables/////////
hipStream_t st_ddot_vectors[16];
hipEvent_t st_vec_done[16];
hipStream_t f_memcpy;
//hipEvent_t half_ls_done = new hipStream_t
//Half linestrength related variable
hipStream_t* st_half_ls = new hipStream_t[nJ*intensity.molec.sym_maxdegen]; //Concurrently run half_ls computations on this many of the half_ls's
double* gpu_half_ls;
//Create initial vector holding point
double* initial_vector = new double[intensity.nsizemax];
double* gpu_initial_vector;
//Final vectors
//Streams for each final vector computation
double* final_vectors;
hipHostMalloc(&final_vectors,sizeof(double)*intensity.nsizemax*no_final_states_gpu, hipHostMallocWriteCombined);
//= new double[intensity.dimenmax*no_final_states_gpu]; //Pin this memory in final build
//int* vec_ilevelF = new int[no_final_states_gpu];
double* gpu_corr_vectors;
double* gpu_final_vectors;
double* check_vector = new double[intensity.dimenmax];
int** vec_ilevel_buff = new int*[2];
vec_ilevel_buff[0] = new int[no_final_states_gpu];
vec_ilevel_buff[1] = new int[no_final_states_gpu];
double* line_str; //= new double[no_final_states_gpu*intensity.molec.sym_maxdegen*intensity.molec.sym_maxdegen];
hipHostMalloc(&line_str,sizeof(double)*no_final_states_gpu*intensity.molec.sym_maxdegen*intensity.molec.sym_maxdegen);
//double* gpu_line_str;
//Track which vectors we are using
int vector_idx=0;
int vector_count=0;
int ilevel_total=0;
int ilevelF=0,start_ilevelF=0;
printf("Finished host side allocation\n");
printf("Copying intial vectors\n");
//Copy them to the gpu
copy_array_to_gpu((void*)initial_vector,(void**)&(gpu_initial_vector),sizeof(double)*intensity.nsizemax,"gpu_initial_vector");
available_gpu_memory -= sizeof(double)*intensity.nsizemax;
printf("Copying final vectors\n");
copy_array_to_gpu((void*)final_vectors,(void**)&(gpu_final_vectors),sizeof(double)*intensity.nsizemax*no_final_states_gpu,"gpu_final_vectors");
available_gpu_memory -= sizeof(double)*intensity.nsizemax*no_final_states_gpu;
printf("Create correlation vectors\n");
hipMalloc((void**)&(gpu_corr_vectors),sizeof(double)*intensity.dimenmax*no_final_states_gpu);
CheckCudaError("Init correlation");
available_gpu_memory -= sizeof(double)*intensity.dimenmax*no_final_states_gpu;
printf("Create Half ls vector\n");
hipMalloc((void**)&(gpu_half_ls),sizeof(double)*intensity.dimenmax*nJ*intensity.molec.sym_maxdegen);
available_gpu_memory -= sizeof(double)*intensity.dimenmax*nJ*intensity.molec.sym_maxdegen;
CheckCudaError("Init half ls");
//copy_array_to_gpu((void*)line_str,(void**)&(gpu_line_str),sizeof(double)*intensity.dimenmax*nJ*intensity.molec.sym_maxdegen,"gpu_line_str");
//A HACK to host the dipole in CPU memory, will slow stuff down considerably
/*if(intensity.host_dipole){
printf("Device pointer fun!!!");
if(hipSuccess != hipHostGetDevicePointer((void **)&g_ptrs.dipole_me, (void *)intensity.dipole_me, 0)){
printf("Device pointer is not fun :(!!");
}
printf("\n\n GPU-> Host pointer: %p\n",g_ptrs.dipole_me);
}*/
printf("Finished gpu copying\n");
//
//Open the eigenvector units
char filename[1024];
//Get the filename1552 bytes stack frame, 24 bytes spill stores, 24 bytes spill loads
printf("Open vector units\n");
FILE** eigenvec_unit = new FILE*[2*intensity.molec.sym_nrepres];
for(int i =0; i< 2; i++){
for(int j = 0; j < intensity.molec.sym_nrepres; j++)
{
if(intensity.isym_do[j] == false) continue;
sprintf(filename,j0eigen_vector_gamma_filebase,intensity.jvals[i],j+1);
printf("Reading %s\n",filename);
eigenvec_unit[i + j*2] = fopen(filename,"r");
if(eigenvec_unit[i + j*2] == NULL)
{
printf("error opening %s \n",filename);
exit(0);
}
}
}
//Initialise cublas
hipblasHandle_t handle;
hipblasStatus_t stat;
stat = hipblasCreate(&handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return;
}
//Create the streams
//Intial state
for(int i = 0; i < intensity.molec.sym_maxdegen; i++)
for(int j=0; j < nJ; j++)
hipStreamCreate(&st_half_ls[j + i*nJ]);
//Final states
hipStreamCreate(&f_memcpy);
for(int i = 0; i < 16; i++){
hipStreamCreate(&st_ddot_vectors[i]);
hipEventCreate(&st_vec_done[i],hipEventDisableTiming);
}
int last_ilevelF= 0;
//////Begin the computation//////////////////////////////////////////////////////////////////////////////
CheckCudaError("Initialisation");
//If zero then itll progress normally otherwise with 4 devices it will go like this
//Thread 0 = 0 4 8 12
//Thread 1 = 1 5 9 13
//Thread 2 = 2 6 10 14
//Thread 3 = 3 7 11 15
//Run
#pragma omp barrier
if(omp_get_thread_num()==0){
printf("Linestrength S(f<-i) [Debye**2], Transition moments [Debye],Einstein coefficient A(if) [1/s],and Intensities [cm/mol]\n\n\n");
}
#pragma omp barrier
//constants
double beta = planck * vellgt / (boltz * intensity.temperature);
double boltz_fc=0.0;
double absorption_int = 0.0;
for(int ilevelI = omp_get_thread_num(); ilevelI < intensity.Neigenlevels; ilevelI+=num_devices){
//printf("new I level!\n");
//Get the basic infor we need
// printf("ilevelI = %i\n",ilevelI);
int indI = intensity.eigen[ilevelI].jind;
int dimenI = intensity.bset_contr[indI+1].Maxcontracts;
int jI = intensity.eigen[ilevelI].jval;
double energyI = intensity.eigen[ilevelI].energy;
int igammaI = intensity.eigen[ilevelI].igamma;
int * quantaI = intensity.eigen[ilevelI].quanta;
int * normalI = intensity.eigen[ilevelI].normal;
int ndegI = intensity.eigen[ilevelI].ndeg;
// printf("ilevelI=%i jI=%i energyI=%11.4f igammaI=%i ndegI=%i\n",ilevelI,jI,energyI,igammaI,ndegI);
int nsizeI = intensity.bset_contr[indI+1].nsize[igammaI];
FILE* unitI = eigenvec_unit[ indI + (igammaI)*2];
//Check filters
if(!energy_filter_lower(intensity,jI,energyI,quantaI)) continue;
//If success then read
fseek(unitI,(intensity.eigen[ilevelI].irec[0]-1)*nsizeI*sizeof(double),SEEK_SET);
fread(initial_vector,sizeof(double),nsizeI,unitI);
stat = hipblasSetVector(nsizeI, sizeof(double),initial_vector, 1, gpu_initial_vector, 1);
int idegF_t = 0;
int igammaF_t = intensity.igamma_pair[igammaI];
CheckCudaError("Set Vector I");
//Do first half ls
for(int indF =0; indF < nJ; indF++){
int jF= intensity.jvals[indF];
if(!indF_filter(intensity,jI,jF,energyI,igammaI,quantaI))continue;
for(int ideg=0; ideg < ndegI; ideg++){
if(!degeneracy_filter(intensity,igammaI,igammaF_t,ideg,idegF_t)) continue;
if(intensity.split_dipole == false){
do_1st_half_ls(g_ptrs.bset_contr[indI],g_ptrs.bset_contr[indF],
intensity.dimenmax,ideg,igammaI,g_ptrs.dipole_me,gpu_initial_vector,gpu_corr_vectors + intensity.dimenmax*ideg,
g_ptrs.threej,
gpu_half_ls + indF*intensity.dimenmax + ideg*intensity.dimenmax*nJ
,st_half_ls[indF]);
}else{
do_1st_half_ls_blocks(g_ptrs.bset_contr[indI],g_ptrs.bset_contr[indF],
intensity.dimenmax,ideg,igammaI,g_ptrs.dipole_me,intensity.dipole_blocks,gpu_initial_vector,gpu_corr_vectors + intensity.dimenmax*ideg,
g_ptrs.threej,
gpu_half_ls + indF*intensity.dimenmax + ideg*intensity.dimenmax*nJ
,st_half_ls[0]);
}
}
//wait for the next batch
}
vector_idx=0;
ilevelF=0;
int current_buff = 0;
//While the half_ls is being computed, lets load up some final state vectors
while(vector_idx < no_final_states_gpu && ilevelF < intensity.Neigenlevels)
{
// !
int indF = intensity.eigen[ilevelF].jind;
// !
//printf("indF=%i",indF);
// !dimension of the bases for the initial states
// !
// !
//!energy, quanta, and gedeneracy order of the initial state
// !
int jF = intensity.eigen[ilevelF].jval;
double energyF = intensity.eigen[ilevelF].energy;
int igammaF = intensity.eigen[ilevelF].igamma;
int * quantaF = intensity.eigen[ilevelF].quanta;
int * normalF = intensity.eigen[ilevelF].normal;
int nsizeF = intensity.bset_contr[indF+1].nsize[igammaF];
int irec = intensity.eigen[ilevelF].irec[0]-1;
FILE* unitF = eigenvec_unit[ indF + (igammaF)*2];
ilevelF++;
if(!energy_filter_upper(intensity,jF,energyF,quantaF)) {continue;}
if(!intensity_filter(intensity,jI,jF,energyI,energyF,igammaI,igammaF,quantaI,quantaF)) continue;
// store the level
vec_ilevel_buff[0][vector_idx] = ilevelF-1;
//printf("ilevelF=%i\n",vec_ilevel_buff[0][vector_idx]);
//Otherwise load the vector to a free slot
fseek(unitF,irec*nsizeF*sizeof(double),SEEK_SET);
fread(final_vectors + vector_idx*intensity.nsizemax,sizeof(double),nsizeF,unitF);
//Increment
vector_idx++;
}
vector_count = vector_idx;
//printf("memcopy");
//Memcopy it in one go
//hipDeviceSynchronize();
hipMemcpyAsync(gpu_final_vectors,final_vectors,sizeof(double)*intensity.nsizemax*vector_count,hipMemcpyHostToDevice,f_memcpy) ;
hipDeviceSynchronize(); //Wait till we're set up
CheckCudaError("Batch final vectors");
//printf("vector_count = %i\n",vector_count);
while(vector_count != 0)
{
int stream_count = 0;
for(int i = 0; i < vector_count; i++){
ilevelF = vec_ilevel_buff[int(current_buff)][i];
//printf("ilevelF=%i\n",ilevelF);
int indF = intensity.eigen[ilevelF].jind;
int jF = intensity.eigen[ilevelF].jval;
double energyF = intensity.eigen[ilevelF].energy;
int igammaF = intensity.eigen[ilevelF].igamma;
int * quantaF = intensity.eigen[ilevelF].quanta;
int * normalF = intensity.eigen[ilevelF].normal;
int nsizeF = intensity.bset_contr[indF+1].nsize[igammaF];
//int irec = intensity.eigen[ilevelF].irec[0]-1;
int dimenF = intensity.bset_contr[indF+1].Maxcontracts;
int ndegF = intensity.eigen[ilevelF].ndeg;
int blockSize =512;
int gridSize = (int)ceil((float)intensity.dimenmax/blockSize);
//for(int i = 0; i < ndeg
//Correlate the vectors
for(int idegF = 0; idegF < ndegF; idegF++){
//gridSize = (int)ceil((float)dimenF/blockSize);
for(int idegI=0; idegI < ndegI; idegI++)
line_str[i + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen] = 0.0;
if(intensity.reduced && idegF!=0) continue;
for(int idegI=0; idegI < ndegI; idegI++){
//line_str[i + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen]=0.0;
//line_str[i + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen] = 0.0;
if(!degeneracy_filter(intensity, igammaI,igammaF,idegI,idegF)) continue;
hipLaunchKernelGGL(( device_correlate_vectors), dim3(gridSize),dim3(blockSize),0,st_ddot_vectors[stream_count], g_ptrs.bset_contr[indF],idegF,igammaF, (gpu_final_vectors + i*intensity.nsizemax),gpu_corr_vectors + intensity.dimenmax*i);
hipblasSetStream(handle,st_ddot_vectors[stream_count]);
hipblasDdot (handle, dimenF,gpu_corr_vectors + intensity.dimenmax*i, 1, gpu_half_ls + indF*intensity.dimenmax + idegI*intensity.dimenmax*nJ, 1,
&line_str[i + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen]);
}
}
stream_count++;
if(stream_count >=16) stream_count=0;
}
//Record the events for synchronization
for(int i = 0; i < 16; i++){
hipEventRecord(st_vec_done[i],st_ddot_vectors[i]);
hipStreamWaitEvent ( f_memcpy,st_vec_done[i],0); //Make this stream wait for the event
}
current_buff = 1-current_buff;
vector_idx = 0;
ilevelF++;
//While the line_Strength is being computed, lets load up some final state vectors
while(vector_idx < no_final_states_gpu && ilevelF < intensity.Neigenlevels)
{
// !
int indF = intensity.eigen[ilevelF].jind;
// !
//printf("indF=%i",indF);
// !dimension of the bases for the initial states
// !
// !
//!energy, quanta, and gedeneracy order of the initial state
// !
int jF = intensity.eigen[ilevelF].jval;
double energyF = intensity.eigen[ilevelF].energy;
int igammaF = intensity.eigen[ilevelF].igamma;
int * quantaF = intensity.eigen[ilevelF].quanta;
int * normalF = intensity.eigen[ilevelF].normal;
int nsizeF = intensity.bset_contr[indF+1].nsize[igammaF];
int irec = intensity.eigen[ilevelF].irec[0]-1;
FILE* unitF = eigenvec_unit[ indF + (igammaF)*2];
ilevelF++;
if(!energy_filter_upper(intensity,jF,energyF,quantaF)) {continue;}
if(!intensity_filter(intensity,jI,jF,energyI,energyF,igammaI,igammaF,quantaI,quantaF)) continue;
// store the level
vec_ilevel_buff[current_buff][vector_idx] = ilevelF-1;
//load the vector to a free slot
fseek(unitF,irec*nsizeF*sizeof(double),SEEK_SET);
fread(final_vectors + vector_idx*intensity.nsizemax,sizeof(double),nsizeF,unitF);
//hipMemcpyAsync(gpu_final_vectors,final_vectors,sizeof(double)*intensity.dimenmax*vector_count,hipMemcpyHostToDevice,st_ddot_vectors[vector_idx]) ;
//Increment
vector_idx++;
}
last_ilevelF=ilevelF;
hipMemcpyAsync(gpu_final_vectors,final_vectors,sizeof(double)*intensity.nsizemax*vector_count,hipMemcpyHostToDevice,f_memcpy) ;
//We'e done now lets output
for(int i = 0; i < 16; i++)
hipEventSynchronize(st_vec_done[i]); //wait for all events to be completed
for(int ivec = 0; ivec < vector_count; ivec++)
{
ilevelF = vec_ilevel_buff[1-current_buff][ivec];
//printf("ilevelF=%i\n",ilevelF);
int indF = intensity.eigen[ilevelF].jind;
int jF = intensity.eigen[ilevelF].jval;
double energyF = intensity.eigen[ilevelF].energy;
int igammaF = intensity.eigen[ilevelF].igamma;
int * quantaF = intensity.eigen[ilevelF].quanta;
int * normalF = intensity.eigen[ilevelF].normal;
int ndegF = intensity.eigen[ilevelF].ndeg;
//hipStreamSynchronize(st_ddot_vectors[ivec]);
double ls=0.0;
double linestr=0.0;
for(int idegF=0; idegF < ndegF; idegF++){
for(int idegI=0; idegI < ndegI; idegI++){
linestr=line_str[ivec + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen];
ls +=(linestr*linestr); //line_str[ivec + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen]*line_str[ivec + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen];
}
}
ls /= double(ndegI);
if (intensity.reduced && ndegF!=1 && ndegI != 1) ls *= double(ndegI);
double final_ls = ls;
double nu_if = energyF - energyI;
boltz_fc = abs(nu_if) * exp(-(energyI-intensity.ZPE) * beta) * (1.0 - exp(-abs(nu_if) * beta))/ intensity.q_stat;
//Print intensitys
//printf("line_str %11.4e\n",line_str);
double A_einst = A_coef_s_1*double((2*jI)+1)*final_ls*pow(abs(nu_if),3);
final_ls = final_ls * intensity.gns[igammaI] * double( (2*jI + 1)*(2 * jF + 1) );
absorption_int = final_ls * intens_cm_mol * boltz_fc;
//if(final_ls < intensity.thresh_linestrength) continue;
/*
printf("%11.4f\t(%i %i ) ( ",nu_if,jF,intensity.eigen[ilevelF].krot);
for(int i = 0; i < intensity.molec.nmodes+1; i++)
printf("%i ",quantaF[i]);
printf(")\t <-- \t(%i %i ) ",jI,intensity.eigen[ilevelI].krot);
for(int i = 0; i < intensity.molec.nmodes+1; i++)
printf("%i ",quantaI[i]);
printf("\t %16.8e %16.8e %16.8e\n",A_einst,final_ls,orig_ls);
*/
/* write(out, "( (i4, 1x, a4, 3x),'<-', (i4, 1x, a4, 3x),a1,&
&(2x, f11.4,1x),'<-',(1x, f11.4,1x),f11.4,2x,&
&'(',1x,a3,x,i3,1x,')',1x,'(',1x,<nclasses>(x,a3),1x,<nmodes>(1x, i3),1x,')',1x,'<- ', &
&'(',1x,a3,x,i3,1x,')',1x,'(',1x,<nclasses>(x,a3),1x,<nmodes>(1x, i3),1x,')',1x, &
& 3(1x, es16.8),2x,(1x,i6,1x),'<-',(1x,i6,1x),i8,1x,i8,&
1x,'(',1x,<nmodes>(1x, i3),1x,')',1x,'<- ',1x,'(',1x,<nmodes>(1x, i3),1x,')',1x,&
<nformat>(1x, es16.8))") &
!
jF,sym%label(igammaF),jI,sym%label(igammaI),branch, &
energyF-intensity%ZPE,energyI-intensity%ZPE,nu_if, &
eigen(ilevelF)%cgamma(0),eigen(ilevelF)%krot,&
eigen(ilevelF)%cgamma(1:nclasses),eigen(ilevelF)%quanta(1:nmodes), &
eigen(ilevelI)%cgamma(0),eigen(ilevelI)%krot,&
eigen(ilevelI)%cgamma(1:nclasses),eigen(ilevelI)%quanta(1:nmodes), &
linestr,A_einst,absorption_int,&
eigen(ilevelF)%ilevel,eigen(ilevelI)%ilevel,&
itransit,istored(ilevelF),normalF(1:nmodes),normalI(1:nmodes),&
linestr_deg(1:ndegI,1:ndegF)
*/
#pragma omp critical(output_ls)
{
printf("%4i %4s <-%4i %4s %1s %11.4f <- %11.4f %11.4f ( %3s %3i ) ( ",jF,intensity.molec.c_sym[igammaF],jI,intensity.molec.c_sym[igammaI],branch(jF,jI),energyF-intensity.ZPE,energyI-intensity.ZPE,abs(nu_if),intensity.eigen[ilevelF].cgamma[0],intensity.eigen[ilevelF].krot);
for(int i = 1; i <= intensity.molec.nclasses; i++)
printf(" %3s",intensity.eigen[ilevelF].cgamma[i]);
printf(" ");
for(int i = 1; i <= intensity.molec.nmodes; i++)
printf(" %3i",quantaF[i]);
printf(" ) <- ( %3s %3i ) ( ",intensity.eigen[ilevelI].cgamma[0],intensity.eigen[ilevelI].krot);
for(int i = 1; i <= intensity.molec.nclasses; i++)
printf(" %3s",intensity.eigen[ilevelI].cgamma[i]);
printf(" ");
for(int i = 1; i <= intensity.molec.nmodes; i++)
printf(" %3i",quantaI[i]);
printf(") %16.8e %16.8e %16.8e %6i <- %6i %8i %8i ( ",final_ls,A_einst,absorption_int,intensity.eigen[ilevelF].ilevel+1,intensity.eigen[ilevelI].ilevel+1,0,0);
for(int i = 1; i <= intensity.molec.nmodes; i++)
printf(" %3i",normalF[i]);
printf(" ) <- ( ");
for(int i = 1; i <= intensity.molec.nmodes; i++)
printf(" %3i",normalI[i]);
printf(" ) ");
//printf(" ) %16.9e\n",1.23456789);
for(int idegF=0; idegF < ndegF; idegF++){
for(int idegI=0; idegI < ndegI; idegI++){
printf(" %16.9e",line_str[ivec + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen]);
}
}
printf("\n");
}
}
//return;
ilevelF=last_ilevelF+1;
//Save the new vector_count
vector_count = vector_idx;
CheckCudaError("Compute final vectors");
hipDeviceSynchronize();
}
}
// printf("Thread =%i done",device_id);
for(int i=0; i< 2; i++){
for(int j = 0; j < intensity.molec.sym_nrepres; j++)
{
if(!intensity.isym_do[j]) continue;
if(eigenvec_unit[i + j*2]!=NULL)
fclose(eigenvec_unit[i + j*2]);
}
}
hipDeviceReset();
hipHostFree(&final_vectors);
hipHostFree(&line_str);
}
|
a5f8995832f47bbbec09aa67bd54f5fc9a5fd232.cu
|
#include "trove_objects.h"
#include "cuda_objects.cuh"
#include "cuda_host.cuh"
#include "cublas_v2.h"
#include "Util.h"
#include <cstdio>
#include <cstdlib>
#include <omp.h>
const size_t max_dipole_size = 5l*1024l*1024l*1024l;
double pi = 4.0 * atan2(1.0,1.0);
double A_coef_s_1 = 64.0*pow(10.0,-36.0) * pow(pi,4.0) / (3.0 * 6.62606896*pow(10.0,-27.0));
double planck = 6.62606896*pow(10.0,-27.0);
double avogno = 6.0221415*pow(10.0,23.0);
double vellgt = 2.99792458*pow(10.0,10.0);
double intens_cm_mol = 8.0*pow(10.0,-36.0) * pow(pi,3.0)*avogno/(3.0*planck*vellgt);
double boltz = 1.380658*pow(10.0,-16.0);
//beta = planck * vellgt / (boltz * intensity%temperature)
void CheckCudaError(const char* tag){
// check for error
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("[%s] CUDA error: %s\n", tag,cudaGetErrorString(error));
exit(-1);
}
}
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error)
{
switch (error)
{
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
// Print device properties
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
void get_cuda_info(FintensityJob & intensity){
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printDevProp(devProp);
} // Iterate through devices
}
int count_free_devices(){
int devCount;
int free_devices=0;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
cudaSetDevice(i);
if(cudaFree(0)==cudaSuccess){
free_devices++;
cudaThreadExit();
}
} // Iterate through devices
return free_devices;
}
int get_free_device(int last){
int device_id=-1;
last++;
int devCount;
cudaGetDeviceCount(&devCount);
for(int i=last; i< devCount; i++){
cudaSetDevice(i);
if(cudaFree(0)==cudaSuccess){
cudaThreadExit();
return i;
}
}
return -1;
}
__host__ void copy_dipole_host(double* dipole_me,double** dipole_me_host,size_t & dip_size)
{
printf("Alloc");
cudaMallocHost(dipole_me_host,dip_size); //Malloc to pinned memory
printf("memcpy");
memcpy(dipole_me_host,dipole_me,dip_size);
}
__host__ void copy_array_to_gpu(void* arr,void** arr_gpu,size_t arr_size,const char* arr_name)
{
//Malloc dipole
if(cudaSuccess != cudaMalloc(arr_gpu,arr_size))
{
fprintf(stderr,"[copy_array_to_gpu]: couldn't malloc for %s \n",arr_name);
CheckCudaError(arr_name);
exit(0);
}
if(cudaSuccess != cudaMemcpy((*arr_gpu),arr,arr_size,cudaMemcpyHostToDevice))
{
fprintf(stderr,"[copy_array_to_gpu]: error copying %s \n",arr_name);
exit(0);
}
};
//Copies relevant information needed to do intensity calculations onto the gpu
//Arguments p1: The bset_contr to copy p2: A device memory pointer to copy to
//Returns how much memory was used in bytes
__host__ size_t copy_bset_contr_to_gpu(TO_bset_contrT* bset_contr,cuda_bset_contrT* bset_gptr,int* ijterms,int sym_nrepres,int*sym_degen)
{
size_t memory_used = 0;
printf("Copying bset_contr for J=%i to gpu........",bset_contr->jval);
//construct a gpu_bset_contr
cuda_bset_contrT to_gpu_bset;
printf("copy easy part\n");
//Copy easy stuff
to_gpu_bset.jval = bset_contr->jval;
to_gpu_bset.Maxsymcoeffs = bset_contr->Maxsymcoeffs;
to_gpu_bset.max_deg_size = bset_contr->max_deg_size;
to_gpu_bset.Maxcontracts = bset_contr->Maxcontracts;
to_gpu_bset.Nclasses = bset_contr->Nclasses;
printf("copy icontr\n");
//GPU pointer to icontr2icase///////////////////////////////////////
int* icontr_gptr;
//Malloc in the gpu
if(cudaSuccess != cudaMalloc(&icontr_gptr,sizeof(int)*bset_contr->Maxcontracts*2))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't allocate memory for icontr2icase for J=%i\n",to_gpu_bset.jval);
exit(0);
}
memory_used += sizeof(int)*bset_contr->Maxcontracts*2;
//give the pointer to the cuda object
to_gpu_bset.icontr2icase = icontr_gptr;
//Copy over
if(cudaSuccess != cudaMemcpy(icontr_gptr,bset_contr->icontr2icase,sizeof(int)*bset_contr->Maxcontracts*2,cudaMemcpyHostToDevice))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't copy icontr2icase to gpu for J=%i\n",to_gpu_bset.jval);
}
////////////////////////////////////////////////////////////////////////
printf("copy iroot\n");
////////////////////////////////Same for iroot_correlat_j0///////////////////////////////////////////////////
int* iroot_corr_gptr;
//Malloc in the gpu
if(cudaSuccess != cudaMalloc (&iroot_corr_gptr , sizeof(int)*bset_contr->Maxcontracts ) )
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't allocate memory for iroot_correlat_j0 for J=%i\n",to_gpu_bset.jval);
exit(0);
}
//give the pointer to the cuda object
to_gpu_bset.iroot_correlat_j0 = iroot_corr_gptr;
memory_used += sizeof(int)*bset_contr->Maxcontracts; //Add memory used
//Copy over
if(cudaSuccess != cudaMemcpy(iroot_corr_gptr,bset_contr->iroot_correlat_j0,sizeof(int)*bset_contr->Maxcontracts,cudaMemcpyHostToDevice))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't copy iroot_correlat_j0 to gpu for J=%i\n",to_gpu_bset.jval);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////// K ////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
printf("copy K\n");
int* k_gptr;
//Malloc in the gpu
if(cudaSuccess != cudaMalloc(&k_gptr,sizeof(int)*bset_contr->Maxcontracts))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't allocate memory for K for J=%i\n",to_gpu_bset.jval);
exit(0);
}
//give the pointer to the cuda object
to_gpu_bset.k = k_gptr;
memory_used += sizeof(int)*bset_contr->Maxcontracts;
//Copy over
if(cudaSuccess != cudaMemcpy(k_gptr,bset_contr->k,sizeof(int)*bset_contr->Maxcontracts,cudaMemcpyHostToDevice))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't copy k to gpu for J=%i\n",to_gpu_bset.jval);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////// KTau ////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
printf("copy Ktau\n");
int* kt_gptr;
//Malloc in the gpu
if(cudaSuccess != cudaMalloc(&kt_gptr,sizeof(int)*bset_contr->Maxcontracts))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't allocate memory for Ktau for J=%i\n",to_gpu_bset.jval);
exit(0);
}
//give the pointer to the cuda object
to_gpu_bset.ktau = kt_gptr;
memory_used += sizeof(int)*bset_contr->Maxcontracts;
//Copy over
if(cudaSuccess != cudaMemcpy(kt_gptr,bset_contr->ktau,sizeof(int)*bset_contr->Maxcontracts,cudaMemcpyHostToDevice))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't copy ktau to gpu for J=%i\n",to_gpu_bset.jval);
exit(0);
}
///////////////////////////////////////////////N///////////////////////////////////////////////////////////////////
printf("copy N\n");
int* N_gptr;
if(cudaSuccess != cudaMalloc(&N_gptr,sizeof(int)*sym_nrepres*bset_contr->Maxsymcoeffs))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't allocate memory for N for J=%i\n",to_gpu_bset.jval);
}
memory_used += sizeof(int)*sym_nrepres*bset_contr->Maxsymcoeffs;
to_gpu_bset.N = N_gptr;
printf("Malloc\n");
int* Ncopy = (int*)malloc(sizeof(int)*sym_nrepres*bset_contr->Maxsymcoeffs);
printf("Make copy\n");
for(int i = 0; i < sym_nrepres; i++){
for(int j = 0; j < bset_contr->Maxsymcoeffs; j++)
{
Ncopy[ i + (j*sym_nrepres)] = bset_contr->irr[i].N[j];
//printf("N[%i,%i] = %i %i\n",i,j,Ncopy[ i + (j*sym_nrepres)],bset_contr->irr[i].N[j]);
}
}
printf("Copy\n");
cudaMemcpy(N_gptr,Ncopy,sizeof(int)*sym_nrepres*bset_contr->Maxsymcoeffs,cudaMemcpyHostToDevice);
to_gpu_bset.N = N_gptr;
free(Ncopy);
////////////////////////////////////////////////////////////////////////////////////////
printf("copy Ntotal\n");
//////////////////////////////N total////////////////////////////////////////////////////////
int* Ntot_gptr;
copy_array_to_gpu((void*)bset_contr->Ntotal,(void**)&Ntot_gptr,sizeof(int)*sym_nrepres,"Ntotal");
to_gpu_bset.Ntotal = Ntot_gptr;
///////////////////////////////////////////
printf("copy irr_repres\n");
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////// irre_repres ////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
double** irr_gptr;
if(cudaSuccess != cudaMalloc(&irr_gptr,sizeof(double*)*sym_nrepres))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't allocate memory for irreducible representation for J=%i\n",to_gpu_bset.jval);
exit(0);
}
memory_used += sizeof(double*)*sym_nrepres;
to_gpu_bset.irr_repres = irr_gptr;
//Hold pointers to doubles
double** d_ptr = (double**)malloc(sizeof(double*)*sym_nrepres);
for(int i =0; i < sym_nrepres; i++)
{
if(cudaSuccess != cudaMalloc(&d_ptr[i],sizeof(double)*bset_contr->Ntotal[i]*sym_degen[i]*bset_contr->mat_size))
{
fprintf(stderr,"[copy_bset_contr_to_gpu]: Couldn't allocate memory for irreducible representation for J=%i\n",to_gpu_bset.jval);
exit(0);
}
memory_used += sizeof(double)*bset_contr->Ntotal[i]*sym_degen[i]*bset_contr->mat_size;
//copy repres to irr_repres
cudaMemcpy(d_ptr[i],bset_contr->irr[i].repres,sizeof(double)*bset_contr->Ntotal[i]*sym_degen[i]*bset_contr->mat_size,cudaMemcpyHostToDevice);
}
//copy pointerlist to irr_gptr;
cudaMemcpy(irr_gptr,d_ptr,sizeof(double*)*sym_nrepres,cudaMemcpyHostToDevice);
free(d_ptr); //clear memory and pointer
d_ptr = 0;
printf("copy ijterms size = %i\n",bset_contr->Maxsymcoeffs*sym_nrepres);
//Copy ijterms
copy_array_to_gpu((void*)ijterms,(void**)&(to_gpu_bset.ijterms),sizeof(int)*bset_contr->Maxsymcoeffs*sym_nrepres,"ijterms");
memory_used += sizeof(int)*bset_contr->Maxsymcoeffs*sym_nrepres;
printf("copy final bset\n");
/////////////////////////////////copy object over////////////////////////////////
cudaMemcpy(bset_gptr,&to_gpu_bset,sizeof(cuda_bset_contrT),cudaMemcpyHostToDevice);
printf(".....done!\n");
return memory_used;
};
__host__ size_t create_and_copy_bset_contr_to_gpu(TO_bset_contrT* bset_contr,cuda_bset_contrT** bset_gptr,int* ijterms,int sym_nrepres,int*sym_degen)
{
if(cudaSuccess != cudaMalloc(bset_gptr,sizeof(cuda_bset_contrT) ) )
{
fprintf(stderr,"[create_and_copy_bset_contr_to_gpu]: Couldn't allocate memory for bset\n");
exit(0);
}
return copy_bset_contr_to_gpu( bset_contr,*bset_gptr,ijterms,sym_nrepres,sym_degen);
}
//Copy threej
__host__ void copy_threej_to_gpu(double* threej,double** threej_gptr, int jmax)
{
copy_array_to_gpu((void*)threej,(void**) threej_gptr, (jmax+1)*(jmax+1)*3*3*sizeof(double),"three_j");
};
///////////Dipole stuff now
__host__ void dipole_initialise(FintensityJob* intensity){
printf("Begin Input\n");
read_fields(intensity);
printf("End Input\n");
//Wake up the gpu//
printf("Wake up gpu\n");
cudaFree(0);
printf("....Done!\n");
int jmax = max(intensity->jvals[0],intensity->jvals[1]);
bset_contr_factory(&(intensity->bset_contr[0]),0,intensity->molec.sym_degen,intensity->molec.sym_nrepres);
bset_contr_factory(&(intensity->bset_contr[1]),intensity->jvals[0],intensity->molec.sym_degen,intensity->molec.sym_nrepres);
bset_contr_factory(&(intensity->bset_contr[2]),intensity->jvals[1],intensity->molec.sym_degen,intensity->molec.sym_nrepres);
//Correlate them
correlate_index(intensity->bset_contr[0],intensity->bset_contr[0]);
correlate_index(intensity->bset_contr[0],intensity->bset_contr[1]);
correlate_index(intensity->bset_contr[0],intensity->bset_contr[2]);
printf("Reading dipole\n");
//Read the dipole
read_dipole(intensity->bset_contr[0],&(intensity->dipole_me),intensity->dip_size);
printf("Computing threej\n");
//Compute threej
precompute_threej(&(intensity->threej),jmax);
//ijterms
printf("Computing ijerms\n");
compute_ijterms((intensity->bset_contr[1]),&(intensity->bset_contr[1].ijterms),intensity->molec.sym_nrepres);
compute_ijterms((intensity->bset_contr[2]),&(intensity->bset_contr[2].ijterms),intensity->molec.sym_nrepres);
//Read eigenvalues
read_eigenvalues((*intensity));
unsigned int dimenmax = 0;
unsigned int nsizemax = 0;
intensity->dimenmax = 0;
intensity->nsizemax = 0;
//Find nsize
for(int i =0; i < intensity->molec.sym_nrepres; i++){
if(intensity->isym_do[i]){
nsizemax= max(intensity->bset_contr[1].nsize[i],nsizemax);
nsizemax = max(intensity->bset_contr[2].nsize[i],nsizemax);
}
}
printf("Biggest vector dimensiton is %u \n",nsizemax);
for(int i = 0; i < 3; i++){
printf("dimenmax = %u J=%i Maxcontracts =%i\n",dimenmax,intensity->bset_contr[i].jval,intensity->bset_contr[i].Maxcontracts);
dimenmax = max(intensity->bset_contr[i].Maxcontracts,dimenmax);
}
//intensity->dimenmax = max(intensity->bset_contr[2].Maxcontracts,intensity->dimenmax);
printf("Biggest max contraction is is %u \n",dimenmax);
intensity->dimenmax = dimenmax;
intensity->nsizemax = nsizemax;
printf("Find igamma pairs\n");
find_igamma_pair((*intensity));
printf("done!\n");
//Begin GPU related initalisation////////////////////////////////////////////////////////
intensity_info int_gpu;
//Copy over constants to GPU
int_gpu.sym_nrepres = intensity->molec.sym_nrepres;
int_gpu.jmax = jmax+1;
int_gpu.dip_stride_1 = intensity->bset_contr[0].Maxcontracts;
int_gpu.dip_stride_2 = intensity->bset_contr[0].Maxcontracts*intensity->bset_contr[0].Maxcontracts;
int_gpu.dimenmax = intensity->dimenmax;
int_gpu.sq2 = 1.0/sqrt(2.0);
copy_array_to_gpu((void*)intensity->molec.sym_degen,(void**)&int_gpu.sym_degen,sizeof(int)*intensity->molec.sym_nrepres,"sym_degen");
CheckCudaError("Pre-initial");
printf("Copy intensity information\n");
copy_intensity_info(&int_gpu);
printf("done\n");
CheckCudaError("Post-initial");
printf("Copying bset_contrs to GPU...\n");
intensity->g_ptrs.bset_contr = new cuda_bset_contrT*[2];
create_and_copy_bset_contr_to_gpu(&intensity->bset_contr[1],&(intensity->g_ptrs.bset_contr[0]),intensity->bset_contr[1].ijterms,intensity->molec.sym_nrepres,intensity->molec.sym_degen);
create_and_copy_bset_contr_to_gpu(&intensity->bset_contr[2],&(intensity->g_ptrs.bset_contr[1]),intensity->bset_contr[2].ijterms,intensity->molec.sym_nrepres,intensity->molec.sym_degen);
printf("Done\n");
printf("Copying threej...\n");
copy_threej_to_gpu(intensity->threej,&(intensity->g_ptrs.threej), jmax);
printf("done\n");
printf("Copying dipole\n");
copy_array_to_gpu((void*)intensity->dipole_me,(void**)&(intensity->g_ptrs.dipole_me),intensity->dip_size,"dipole_me");
printf("Done..");
//exit(0);
//Will improve
intensity->gpu_memory = 1l*1024l*1024l*1024l;
intensity->cpu_memory = 1l*1024l*1024l*1024l;
};
__host__ void dipole_do_intensities(FintensityJob & intensity){
//Prinf get available cpu memory
unsigned long available_cpu_memory = intensity.cpu_memory;
unsigned long available_gpu_memory = intensity.gpu_memory;
//Compute how many inital state vectors and final state vectors
unsigned long no_final_states_cpu = ((available_cpu_memory)/8l - long(2*intensity.dimenmax))/(3l*intensity.dimenmax);//(Initial + vec_cor + half_ls)*dimen_max
unsigned long no_final_states_gpu = ((available_gpu_memory)/8l - long(2*intensity.dimenmax))/(3l*intensity.dimenmax);//(Initial + vec_cor + half_ls)*dimen_max
printf("No of final states in gpu_memory: %d cpu memory: %d\n",no_final_states_gpu,no_final_states_cpu);
//The intial state vector
double* initial_vec = new double[intensity.dimenmax];
double* gpu_initial_vec=NULL;
copy_array_to_gpu((void*)initial_vec,(void**)&(gpu_initial_vec),sizeof(double)*intensity.dimenmax,"gpu_initial_vec");
printf("%p\n",gpu_initial_vec);
double* final_vec = new double[intensity.dimenmax];
double* gpu_final_vec=NULL;
copy_array_to_gpu((void*)final_vec,(void**)&(gpu_final_vec),sizeof(double)*intensity.dimenmax,"gpu_final_vec");
double* corr_vec = new double[intensity.dimenmax];
double* gpu_corr_vec=NULL;
copy_array_to_gpu((void*)corr_vec,(void**)&(gpu_corr_vec),sizeof(double)*intensity.dimenmax,"gpu_corr_vec");
double* half_ls = new double[intensity.dimenmax];
double** gpu_half_ls=new double*[2];
copy_array_to_gpu((void*)half_ls,(void**)&(gpu_half_ls[0]),sizeof(double)*intensity.dimenmax,"gpu_half_ls1");
copy_array_to_gpu((void*)half_ls,(void**)&(gpu_half_ls[1]),sizeof(double)*intensity.dimenmax,"gpu_half_ls2");
double line_str =0.0;
char filename[1024];
//Get the filename
printf("Open vector unit\n");
FILE** eigenvec_unit = new FILE*[2*intensity.molec.sym_nrepres];
for(int i =0; i< 2; i++){
for(int j = 0; j < intensity.molec.sym_nrepres; j++)
{
sprintf(filename,j0eigen_vector_gamma_filebase,intensity.jvals[i],j+1);
printf("Reading %s\n",filename);
eigenvec_unit[i + j*2] = fopen(filename,"r");
if(eigenvec_unit[i + j*2] == NULL)
{
printf("error opening %s \n",filename);
exit(0);
}
}
}
//Opened all units, now lets start compuing
//Initialise cublas
cublasHandle_t handle;
cublasStatus_t stat;
stat = cublasCreate(&handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return;
}
CheckCudaError("Initialisation");
// Number of threads in each thread block
int blockSize =256;
// Number of thread blocks in grid
int gridSize = (int)ceil((float)intensity.dimenmax/blockSize);
printf("Nu_if\tJf Kf quantaF\t <-- \tJI KI tauI quantaI\t Ein_A\tLine_str\n");
//Run
for(int ilevelI = 0; ilevelI < intensity.Neigenlevels; ilevelI++){
// ! start measuring time per line
// !
int indI = intensity.eigen[ilevelI].jind;
// !
// !dimension of the bases for the initial states
// !
int dimenI = intensity.bset_contr[indI+1].Maxcontracts;
// !
// !energy, quanta, and gedeneracy order of the initial state
// !
int jI = intensity.eigen[ilevelI].jval;
double energyI = intensity.eigen[ilevelI].energy;
int igammaI = intensity.eigen[ilevelI].igamma;
int * quantaI = intensity.eigen[ilevelI].quanta;
int * normalI = intensity.eigen[ilevelI].normal;
int ndegI = intensity.eigen[ilevelI].ndeg;
int nsizeI = intensity.bset_contr[indI+1].nsize[igammaI];
FILE* unitI = eigenvec_unit[ indI + (igammaI)*2];
// printf("Ilevel = %i\n",ilevelI);
if(!energy_filter_lower(intensity,jI,energyI,quantaI)) continue;
fseek(unitI,(intensity.eigen[ilevelI].irec[0]-1)*nsizeI*sizeof(double),SEEK_SET);
//Read vector from file
// printf("Read vector\n");
int tread = fread(initial_vec,sizeof(double),nsizeI,unitI);
//for(int i=0; i< nsizeI; i++){
// printf("vec[%i]=%16.8e\n",i,initial_vec[i]);}
//printf("read = %i\n",tread);
//Transfer it to the GPU
// printf("Transfer vector\n");
stat = cublasSetVector(intensity.dimenmax, sizeof(double),initial_vec, 1, gpu_initial_vec, 1);
CheckCudaError("Set Vector I");
cudaDeviceSynchronize();
// printf("Correlating vectors\n");
//for(int ideg = 0; ideg < ndegI; ideg++){
//host_correlate_vectors(&intensity.bset_contr[indI+1],0,igammaI,intensity.bset_contr[indI+1].ijterms,intensity.molec.sym_degen,initial_vec,corr_vec);
device_correlate_vectors<<<gridSize,blockSize>>>(intensity.g_ptrs.bset_contr[indI],0,igammaI, gpu_initial_vec,gpu_corr_vec);
CheckCudaError("device correlate I");
cudaDeviceSynchronize();
//
//printf("Done\n");
printf("J= %i energy = %11.4f\n",jI,energyI);
printf("----------------------------------\n");
for(int indF=0; indF <2; indF++){
device_compute_1st_half_ls<<<gridSize,blockSize>>>(intensity.g_ptrs.bset_contr[indI],intensity.g_ptrs.bset_contr[indF],intensity.g_ptrs.dipole_me,igammaI,gpu_corr_vec,intensity.g_ptrs.threej,gpu_half_ls[indF]);
//CheckCudaError("compute half ls I");
//cudaDeviceSynchronize();
//cublasGetVector(dimenI, sizeof(double),gpu_half_ls[indF], 1, half_ls, 1);
//for(int i=0; i< dimenI; i++){
// printf("half_ls[%i]=%16.8e\n",i,half_ls[i]);}
//printf("----------------------------------\n");
}
//Final states
for(int ilevelF = 0; ilevelF < intensity.Neigenlevels; ilevelF++){
// ! start measuring time per line
// !
int indF = intensity.eigen[ilevelF].jind;
// !
//printf("indF=%i",indF);
// !dimension of the bases for the initial states
// !
int dimenF = intensity.bset_contr[indF+1].Maxcontracts;
// !
// !energy, quanta, and gedeneracy order of the initial state
// !
int jF = intensity.eigen[ilevelF].jval;
double energyF = intensity.eigen[ilevelF].energy;
int igammaF = intensity.eigen[ilevelF].igamma;
int * quantaF = intensity.eigen[ilevelF].quanta;
int * normalF = intensity.eigen[ilevelF].normal;
int ndegF = intensity.eigen[ilevelF].ndeg;
int nsizeF = intensity.bset_contr[indF+1].nsize[igammaF];
FILE* unitF = eigenvec_unit[ indF + (igammaF)*2];
if(!energy_filter_upper(intensity,jF,energyF,quantaF)) continue;
for(int i = 0; i < intensity.dimenmax; i++){
final_vec[i]=0.0;
}
fseek(unitF,(intensity.eigen[ilevelF].irec[0]-1)*nsizeF*sizeof(double),SEEK_SET);
//Read vector from file
fread(final_vec,sizeof(double),nsizeF,unitF);
//for(int i=0; i< dimenF; i++){
// printf("ivec[%i]=%16.8e\n",i,final_vec[i]);}
if(!intensity_filter(intensity,jI,jF,energyI,energyF,igammaI,igammaF,quantaI,quantaF)) continue;
//device_clear_vector<<<gridSize,blockSize>>>(gpu_final_vec);
//Transfer it to the GPU
stat = cublasSetVector(intensity.dimenmax, sizeof(double),final_vec, 1, gpu_final_vec, 1);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS SetVector F failed\n");
printf ("Error code: %s\n",_cudaGetErrorEnum(stat));
return;
}
double nu_if = energyF - energyI;
//for(int ideg = 0; ideg < ndegF; ideg++){
device_correlate_vectors<<<gridSize,blockSize>>>(intensity.g_ptrs.bset_contr[indF],0,igammaF, gpu_final_vec,gpu_corr_vec);
CheckCudaError("correlate final vector");
cudaDeviceSynchronize();
//cublasGetVector(dimenF, sizeof(double),gpu_corr_vec, 1, corr_vec, 1);
//for(int i=0; i< dimenF; i++){
// printf("ivec[%i]=%16.8e\n",i,corr_vec[i]);}
//}
//
cudaDeviceSynchronize();
//Compute ls
// for(int i = 0; i < dimenF; i++)
// printf("%11.4e\n",corr_vec[i]);
// //exit(0);
line_str = 0;
//cublasDdot (handle,intensity.dimenmax,gpu_half_ls[indF], 1,gpu_corr_vec, 1,&line_str);
cublasDdot (handle, intensity.dimenmax, gpu_corr_vec, 1, gpu_half_ls[indF], 1, &line_str);
//cublasDdot (handle, intensity.dimenmax, gpu_half_ls[indF], 1, gpu_half_ls[indF], 1, &line_str);
double orig_ls = line_str;
//Print intensitys
line_str *= line_str;
//printf("line_str %11.4e\n",line_str);
double A_einst = A_coef_s_1*double((2*jI)+1)*line_str*pow(abs(nu_if),3);
line_str = line_str * intensity.gns[igammaI] * double( (2*jI + 1)*(2 * jF + 1) );
//if(line_str < 0.00000000001) continue;
/*
write(out, "( (i4, 1x, a4, 3x),'<-', (i4, 1x, a4, 3x),a1,&
&(2x, f11.4,1x),'<-',(1x, f11.4,1x),f11.4,2x,&
&'(',1x,a3,x,i3,1x,')',1x,'(',1x,<nclasses>(x,a3),1x,<nmodes>(1x, i3),1x,')',1x,'<- ', &
&'(',1x,a3,x,i3,1x,')',1x,'(',1x,<nclasses>(x,a3),1x,<nmodes>(1x, i3),1x,')',1x, &
& 3(1x, es16.8),2x,(1x,i6,1x),'<-',(1x,i6,1x),i8,1x,i8,&
1x,'(',1x,<nmodes>(1x, i3),1x,')',1x,'<- ',1x,'(',1x,<nmodes>(1x, i3),1x,')',1x,&
<nformat>(1x, es16.8))") &
!
jF,sym%label(igammaF),jI,sym%label(igammaI),branch, &
energyF-intensity%ZPE,energyI-intensity%ZPE,nu_if, &
eigen(ilevelF)%cgamma(0),eigen(ilevelF)%krot,&
eigen(ilevelF)%cgamma(1:nclasses),eigen(ilevelF)%quanta(1:nmodes), &
eigen(ilevelI)%cgamma(0),eigen(ilevelI)%krot,&
eigen(ilevelI)%cgamma(1:nclasses),eigen(ilevelI)%quanta(1:nmodes), &
linestr,A_einst,absorption_int,&
eigen(ilevelF)%ilevel,eigen(ilevelI)%ilevel,&
itransit,istored(ilevelF),normalF(1:nmodes),normalI(1:nmodes),&
linestr_deg(1:ndegI,1:ndegF)
endif
*/
printf("%11.4f\t(%i %i ) ( ",nu_if,jF,intensity.eigen[ilevelF].krot);
for(int i = 0; i < intensity.molec.nmodes+1; i++)
printf("%i ",quantaF[i]);
printf(")\t <-- \t(%i %i ) ",jI,intensity.eigen[ilevelI].krot);
for(int i = 0; i < intensity.molec.nmodes+1; i++)
printf("%i ",quantaI[i]);
printf("\t %16.8e %16.8e %16.8e\n",A_einst,line_str,orig_ls);
//exit(0);
}
}
}
__host__ void do_1st_half_ls(cuda_bset_contrT* bset_contrI,cuda_bset_contrT* bset_contrF,int dimenMax,int idegI,int igammaI,double* dipole_me,double* vecI,double* vec,double* threej,double* half_ls,cudaStream_t stream = 0){
int blockSize = 512;
int gridSize = gridSize = (int)ceil((float)dimenMax/blockSize);
device_correlate_vectors<<<gridSize,blockSize,0,stream>>>(bset_contrI,idegI,igammaI, vecI,vec);
blockSize = 64;
int numSMs;
cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, 0);
device_compute_1st_half_ls_flipped_dipole<<<numSMs*40,blockSize,0,stream>>>(bset_contrI,bset_contrF,
dipole_me,vec,threej,
half_ls);
}
__host__ void do_1st_half_ls_blocks(cuda_bset_contrT* bset_contrI,cuda_bset_contrT* bset_contrF,int dimenMax,int idegI,int igammaI,double* gpu_dipole,FDipole_ptrs & dipole_me,double* vecI,double* vec,double* threej,double* half_ls,cudaStream_t stream = 0){
int blockSize = 512;
int gridSize = (int)ceil((float)dimenMax/blockSize);
device_correlate_vectors<<<gridSize,blockSize,0,stream>>>(bset_contrI,idegI,igammaI, vecI,vec);
CheckCudaError("correlate");
blockSize = 64;
gridSize = (int)ceil((float)dimenMax/blockSize);
int parts = dipole_me.parts;
// printf("parts = %i\n",parts);
for(int i = 0; i < parts; i++){
// printf("i=%i\n startF = %i endF = %i ncontr = %i",i, dipole_me.dip_block[i].startF, dipole_me.dip_block[i].endF ,dipole_me.dip_block[i].ncontrF );
device_compute_1st_half_ls_flipped_dipole_blocks<<<gridSize,blockSize,0,stream>>>(bset_contrI,bset_contrF,
dipole_me.dip_block[i].startF,dipole_me.dip_block[i].endF,dipole_me.dip_block[i].ncontrF,gpu_dipole,vec,threej,
half_ls); //Compute half ls
//Transfer next block
CheckCudaError("half_ls");
if((i+1) >= parts){
//printf("memcopy %i/%i\n",i+1,parts);
//printf("size=%zu\n gpu_dipole = %p dipole_me = %p \n",dipole_me.dip_block[0].size,gpu_dipole,dipole_me.dip_block[0].dipole_me);
cudaMemcpyAsync(gpu_dipole,dipole_me.dip_block[0].dipole_me,dipole_me.dip_block[0].size,cudaMemcpyHostToDevice,stream) ;
}else{
// printf("memcopy %i\n",i);
// printf("size=%zu\n gpu_dipole = %p dipole_me = %p \n",dipole_me.dip_block[i+1].size,gpu_dipole,dipole_me.dip_block[i+1].dipole_me);
cudaMemcpyAsync(gpu_dipole,dipole_me.dip_block[i+1].dipole_me,dipole_me.dip_block[i+1].size,cudaMemcpyHostToDevice,stream) ;
}
CheckCudaError("Memcpy");
//cudaDeviceSynchronize();
}
//exit(0);
//exit(0);
}
__host__ void do_1st_half_ls_branch(cuda_bset_contrT* bset_contrI,cuda_bset_contrT* bset_contrF,int dimenMax,int idegI,int igammaI,double* dipole_me,double* vecI,double* vec,double* threej,double* half_ls,cudaStream_t stream = 0){
int blockSize = 512;
int gridSize = gridSize = (int)ceil((float)dimenMax/blockSize);
device_correlate_vectors<<<gridSize,blockSize,0,stream>>>(bset_contrI,idegI,igammaI, vecI,vec);
blockSize = 256;
int numSMs;
cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, 0);
device_compute_1st_half_ls_flipped_dipole_branch<<<numSMs/2,blockSize,0,stream>>>(bset_contrI,bset_contrF,
dipole_me,vec,threej,
half_ls);
}
/////////////////////////////--------------------Multi-threaded verstions--------------------///////////////////////////////////////////////
__host__ void dipole_initialise_cpu(FintensityJob* intensity){
printf("Begin Input\n");
read_fields(intensity);
printf("End Input\n");
int jmax = max(intensity->jvals[0],intensity->jvals[1]);
//Now create the bset_contrs
printf("Sym_nrepsres = %i\n",intensity->molec.sym_nrepres);
//Now create the bset_contrs
bset_contr_factory(&(intensity->bset_contr[0]),0,intensity->molec.sym_degen,intensity->molec.sym_nrepres);
intensity->molec.nclasses = intensity->bset_contr[0].Nclasses;
bset_contr_factory(&(intensity->bset_contr[1]),intensity->jvals[0],intensity->molec.sym_degen,intensity->molec.sym_nrepres);
bset_contr_factory(&(intensity->bset_contr[2]),intensity->jvals[1],intensity->molec.sym_degen,intensity->molec.sym_nrepres);
//Correlate them
correlate_index(intensity->bset_contr[0],intensity->bset_contr[0]);
correlate_index(intensity->bset_contr[0],intensity->bset_contr[1]);
correlate_index(intensity->bset_contr[0],intensity->bset_contr[2]);
printf("Check dipole size\n");
size_t dipole_size = GetFilenameSize("j0_extfield.chk");
if(dipole_size > max_dipole_size){
printf("Splitting dipole\n");
read_dipole_flipped_blocks(intensity->bset_contr[0],intensity->dipole_blocks,2);
intensity->split_dipole = true;
}else{
//Read the dipole
read_dipole_flipped(intensity->bset_contr[0],&(intensity->dipole_me),intensity->dip_size);
intensity->split_dipole = false;
}
printf("Computing threej\n");
//Compute threej
precompute_threej(&(intensity->threej),jmax);
//ijterms
printf("Computing ijerms\n");
compute_ijterms((intensity->bset_contr[1]),&(intensity->bset_contr[1].ijterms),intensity->molec.sym_nrepres);
compute_ijterms((intensity->bset_contr[2]),&(intensity->bset_contr[2].ijterms),intensity->molec.sym_nrepres);
//Read eigenvalues
read_eigenvalues((*intensity));
intensity->dimenmax = 0;
intensity->nsizemax = 0;
//Find nsize
for(int i =0; i < intensity->molec.sym_nrepres; i++){
if(intensity->isym_do[i]){
intensity->nsizemax= max(intensity->bset_contr[1].nsize[i],intensity->nsizemax);
intensity->nsizemax = max(intensity->bset_contr[2].nsize[i],intensity->nsizemax);
}
}
printf("Biggest vector dimensiton is %i \n",intensity->nsizemax);
intensity->dimenmax = max(intensity->bset_contr[1].Maxcontracts,intensity->dimenmax);
intensity->dimenmax = max(intensity->bset_contr[2].Maxcontracts,intensity->dimenmax);
printf("Biggest max contraction is is %i \n",intensity->dimenmax);
printf("Find igamma pairs\n");
find_igamma_pair((*intensity));
printf("done!\n");
};
__host__ void dipole_initialise_gpu(FintensityJob * intensity, FGPU_ptrs & g_ptrs,int device_id){
int jmax = max(intensity->jvals[0],intensity->jvals[1]);
//Get available memory
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, device_id);
g_ptrs.avail_mem = size_t(double(devProp.totalGlobalMem)*0.95);
printf("Available gpu memory = %2.4f GB",float(g_ptrs.avail_mem)/(1024.0f*1024.0f*1024.0f));
printf("Total global memory: %zu\n", devProp.totalGlobalMem);
//Begin GPU related initalisation////////////////////////////////////////////////////////
intensity_info int_gpu;
//Copy over constants to GPU
int_gpu.sym_nrepres = intensity->molec.sym_nrepres;
int_gpu.jmax = jmax+1;
int_gpu.dip_stride_1 = intensity->bset_contr[0].Maxcontracts;
int_gpu.dip_stride_2 = intensity->bset_contr[0].Maxcontracts*intensity->bset_contr[0].Maxcontracts;
int_gpu.dimenmax = intensity->dimenmax;
int_gpu.sq2 = 1.0/sqrt(2.0);
printf("Sym max_degen = %i\n",intensity->molec.sym_maxdegen);
copy_array_to_gpu((void*)intensity->molec.sym_degen,(void**)&int_gpu.sym_degen,sizeof(int)*intensity->molec.sym_nrepres,"sym_degen");
g_ptrs.avail_mem -= sizeof(int)*intensity->molec.sym_nrepres;
CheckCudaError("Pre-initial");
printf("Copy intensity information...");
copy_intensity_info(&int_gpu);
printf("done...");
CheckCudaError("Post-initial");
printf("Copying bset_contrs to GPU...");
g_ptrs.bset_contr = new cuda_bset_contrT*[2];
g_ptrs.avail_mem -= create_and_copy_bset_contr_to_gpu(&intensity->bset_contr[1],&(g_ptrs.bset_contr[0]),intensity->bset_contr[1].ijterms,intensity->molec.sym_nrepres,intensity->molec.sym_degen);
g_ptrs.avail_mem -= create_and_copy_bset_contr_to_gpu(&intensity->bset_contr[2],&(g_ptrs.bset_contr[1]),intensity->bset_contr[2].ijterms,intensity->molec.sym_nrepres,intensity->molec.sym_degen);
printf("Done..");
printf("Copying threej...");
copy_threej_to_gpu(intensity->threej,&(g_ptrs.threej), jmax);
g_ptrs.avail_mem -=(jmax+1)*(jmax+1)*3*3*sizeof(double);
printf("done..");
/*
if(intensity->dip_size > g_ptrs.avail_mem)
{
printf("Dipole too large to fit into gpu memory, leaving on host gpu_avail = %zu dipole_size = %zu\n",g_ptrs.avail_mem,intensity->dip_size);
if(omp_get_thread_num()==0) intensity->host_dipole=true;
}else{
printf("Copying dipole...");
copy_array_to_gpu((void*)intensity->dipole_me,(void**)&(g_ptrs.dipole_me),intensity->dip_size,"dipole_me");
g_ptrs.avail_mem -=intensity->dip_size;
intensity->host_dipole=false;
}
#pragma omp barrier
if(intensity->host_dipole && omp_get_thread_num()==0){
printf("Copying dipole\n");
//double* replacement_dipole;
printf("Allocing memory....");
if(cudaSuccess != cudaHostRegister(intensity->dipole_me,intensity->dip_size,cudaHostAllocPortable | cudaHostAllocMapped | cudaHostAllocWriteCombined)) printf("Could not malloc!!!\n");
CheckCudaError("Dipole!");
printf("copying....");
//memcpy(replacement_dipole,intensity->dipole_me,intensity->dip_size);
//copy_dipole_host(intensity->dipole_me,&replacement_dipole,intensity->dip_size);
printf("Done");
//Clear dipole from memory
//delete[] intensity->dipole_me;
//Put new dipole
//intensity->dipole_me = replacement_dipole;
}
*/
intensity->host_dipole=false;
if(intensity->split_dipole){
size_t malloc_size = intensity->dipole_blocks.dip_block[0].size;
//for(int i = 0; i < intensity->dipole_blocks.parts; i++){
// malloc_size = max((unsigned long long)malloc_size,(unsigned long long)intensity->dipole_blocks.dip_block[i].size);
//}
printf("maqlloc size is %zu\n",malloc_size);
copy_array_to_gpu((void*)intensity->dipole_blocks.dip_block[0].dipole_me,(void**)&(g_ptrs.dipole_me),malloc_size,"dipole_me_block");
g_ptrs.avail_mem -=malloc_size ;
}else{
copy_array_to_gpu((void*)intensity->dipole_me,(void**)&(g_ptrs.dipole_me),intensity->dip_size,"dipole_me");
g_ptrs.avail_mem -=intensity->dip_size;
intensity->host_dipole=false;
}
#pragma omp barrier
printf("Left over memory is %zu bytes\n",g_ptrs.avail_mem);
printf("Done\n");
}
__host__ void dipole_do_intensities_async_omp(FintensityJob & intensity,int device_id,int num_devices){
cudaThreadExit(); // clears all the runtime state for the current thread
cudaSetDevice(device_id); //Set the device name
//Wake up the gpu//
//printf("Wake up gpu\n");
cudaFree(0);
//printf("....Done!\n");
int current_stream = 0;
int nJ = 2;
//Setup the gpu pointers
FGPU_ptrs g_ptrs;
dipole_initialise_gpu(&intensity,g_ptrs,device_id); // Initialise the gpu pointers
//Prinf get available cpu memory
//unsigned long available_cpu_memory = intensity.cpu_memory;
size_t available_gpu_memory = g_ptrs.avail_mem;
//Compute how many inital state vectors and final state vectors
//unsigned long no_final_states_cpu = ((available_cpu_memory)/8l - long(2*intensity.dimenmax))/(3l*intensity.dimenmax);//(Initial + vec_cor + half_ls)*dimen_max
size_t no_final_states_gpu = available_gpu_memory/sizeof(double);
no_final_states_gpu -= intensity.nsizemax + intensity.dimenmax*nJ*intensity.molec.sym_maxdegen;
no_final_states_gpu /= ( intensity.nsizemax + intensity.dimenmax );
printf("We can fit %zu states in the GPU memory\n",no_final_states_gpu);
//no_final_states_gpu /=2;
//no_final_states_gpu = 10;
printf("%zu\n",no_final_states_gpu);
no_final_states_gpu = min((unsigned int )intensity.Neigenlevels,(unsigned int )no_final_states_gpu);
printf("%d\n",no_final_states_gpu);
//Create Stream variables/////////
cudaStream_t st_ddot_vectors[16];
cudaEvent_t st_vec_done[16];
cudaStream_t f_memcpy;
//cudaEvent_t half_ls_done = new cudaStream_t
//Half linestrength related variable
cudaStream_t* st_half_ls = new cudaStream_t[nJ*intensity.molec.sym_maxdegen]; //Concurrently run half_ls computations on this many of the half_ls's
double* gpu_half_ls;
//Create initial vector holding point
double* initial_vector = new double[intensity.nsizemax];
double* gpu_initial_vector;
//Final vectors
//Streams for each final vector computation
double* final_vectors;
cudaMallocHost(&final_vectors,sizeof(double)*intensity.nsizemax*no_final_states_gpu, cudaHostAllocWriteCombined);
//= new double[intensity.dimenmax*no_final_states_gpu]; //Pin this memory in final build
//int* vec_ilevelF = new int[no_final_states_gpu];
double* gpu_corr_vectors;
double* gpu_final_vectors;
double* check_vector = new double[intensity.dimenmax];
int** vec_ilevel_buff = new int*[2];
vec_ilevel_buff[0] = new int[no_final_states_gpu];
vec_ilevel_buff[1] = new int[no_final_states_gpu];
double* line_str; //= new double[no_final_states_gpu*intensity.molec.sym_maxdegen*intensity.molec.sym_maxdegen];
cudaMallocHost(&line_str,sizeof(double)*no_final_states_gpu*intensity.molec.sym_maxdegen*intensity.molec.sym_maxdegen);
//double* gpu_line_str;
//Track which vectors we are using
int vector_idx=0;
int vector_count=0;
int ilevel_total=0;
int ilevelF=0,start_ilevelF=0;
printf("Finished host side allocation\n");
printf("Copying intial vectors\n");
//Copy them to the gpu
copy_array_to_gpu((void*)initial_vector,(void**)&(gpu_initial_vector),sizeof(double)*intensity.nsizemax,"gpu_initial_vector");
available_gpu_memory -= sizeof(double)*intensity.nsizemax;
printf("Copying final vectors\n");
copy_array_to_gpu((void*)final_vectors,(void**)&(gpu_final_vectors),sizeof(double)*intensity.nsizemax*no_final_states_gpu,"gpu_final_vectors");
available_gpu_memory -= sizeof(double)*intensity.nsizemax*no_final_states_gpu;
printf("Create correlation vectors\n");
cudaMalloc((void**)&(gpu_corr_vectors),sizeof(double)*intensity.dimenmax*no_final_states_gpu);
CheckCudaError("Init correlation");
available_gpu_memory -= sizeof(double)*intensity.dimenmax*no_final_states_gpu;
printf("Create Half ls vector\n");
cudaMalloc((void**)&(gpu_half_ls),sizeof(double)*intensity.dimenmax*nJ*intensity.molec.sym_maxdegen);
available_gpu_memory -= sizeof(double)*intensity.dimenmax*nJ*intensity.molec.sym_maxdegen;
CheckCudaError("Init half ls");
//copy_array_to_gpu((void*)line_str,(void**)&(gpu_line_str),sizeof(double)*intensity.dimenmax*nJ*intensity.molec.sym_maxdegen,"gpu_line_str");
//A HACK to host the dipole in CPU memory, will slow stuff down considerably
/*if(intensity.host_dipole){
printf("Device pointer fun!!!");
if(cudaSuccess != cudaHostGetDevicePointer((void **)&g_ptrs.dipole_me, (void *)intensity.dipole_me, 0)){
printf("Device pointer is not fun :(!!");
}
printf("\n\n GPU-> Host pointer: %p\n",g_ptrs.dipole_me);
}*/
printf("Finished gpu copying\n");
//
//Open the eigenvector units
char filename[1024];
//Get the filename1552 bytes stack frame, 24 bytes spill stores, 24 bytes spill loads
printf("Open vector units\n");
FILE** eigenvec_unit = new FILE*[2*intensity.molec.sym_nrepres];
for(int i =0; i< 2; i++){
for(int j = 0; j < intensity.molec.sym_nrepres; j++)
{
if(intensity.isym_do[j] == false) continue;
sprintf(filename,j0eigen_vector_gamma_filebase,intensity.jvals[i],j+1);
printf("Reading %s\n",filename);
eigenvec_unit[i + j*2] = fopen(filename,"r");
if(eigenvec_unit[i + j*2] == NULL)
{
printf("error opening %s \n",filename);
exit(0);
}
}
}
//Initialise cublas
cublasHandle_t handle;
cublasStatus_t stat;
stat = cublasCreate(&handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return;
}
//Create the streams
//Intial state
for(int i = 0; i < intensity.molec.sym_maxdegen; i++)
for(int j=0; j < nJ; j++)
cudaStreamCreate(&st_half_ls[j + i*nJ]);
//Final states
cudaStreamCreate(&f_memcpy);
for(int i = 0; i < 16; i++){
cudaStreamCreate(&st_ddot_vectors[i]);
cudaEventCreate(&st_vec_done[i],cudaEventDisableTiming);
}
int last_ilevelF= 0;
//////Begin the computation//////////////////////////////////////////////////////////////////////////////
CheckCudaError("Initialisation");
//If zero then itll progress normally otherwise with 4 devices it will go like this
//Thread 0 = 0 4 8 12
//Thread 1 = 1 5 9 13
//Thread 2 = 2 6 10 14
//Thread 3 = 3 7 11 15
//Run
#pragma omp barrier
if(omp_get_thread_num()==0){
printf("Linestrength S(f<-i) [Debye**2], Transition moments [Debye],Einstein coefficient A(if) [1/s],and Intensities [cm/mol]\n\n\n");
}
#pragma omp barrier
//constants
double beta = planck * vellgt / (boltz * intensity.temperature);
double boltz_fc=0.0;
double absorption_int = 0.0;
for(int ilevelI = omp_get_thread_num(); ilevelI < intensity.Neigenlevels; ilevelI+=num_devices){
//printf("new I level!\n");
//Get the basic infor we need
// printf("ilevelI = %i\n",ilevelI);
int indI = intensity.eigen[ilevelI].jind;
int dimenI = intensity.bset_contr[indI+1].Maxcontracts;
int jI = intensity.eigen[ilevelI].jval;
double energyI = intensity.eigen[ilevelI].energy;
int igammaI = intensity.eigen[ilevelI].igamma;
int * quantaI = intensity.eigen[ilevelI].quanta;
int * normalI = intensity.eigen[ilevelI].normal;
int ndegI = intensity.eigen[ilevelI].ndeg;
// printf("ilevelI=%i jI=%i energyI=%11.4f igammaI=%i ndegI=%i\n",ilevelI,jI,energyI,igammaI,ndegI);
int nsizeI = intensity.bset_contr[indI+1].nsize[igammaI];
FILE* unitI = eigenvec_unit[ indI + (igammaI)*2];
//Check filters
if(!energy_filter_lower(intensity,jI,energyI,quantaI)) continue;
//If success then read
fseek(unitI,(intensity.eigen[ilevelI].irec[0]-1)*nsizeI*sizeof(double),SEEK_SET);
fread(initial_vector,sizeof(double),nsizeI,unitI);
stat = cublasSetVector(nsizeI, sizeof(double),initial_vector, 1, gpu_initial_vector, 1);
int idegF_t = 0;
int igammaF_t = intensity.igamma_pair[igammaI];
CheckCudaError("Set Vector I");
//Do first half ls
for(int indF =0; indF < nJ; indF++){
int jF= intensity.jvals[indF];
if(!indF_filter(intensity,jI,jF,energyI,igammaI,quantaI))continue;
for(int ideg=0; ideg < ndegI; ideg++){
if(!degeneracy_filter(intensity,igammaI,igammaF_t,ideg,idegF_t)) continue;
if(intensity.split_dipole == false){
do_1st_half_ls(g_ptrs.bset_contr[indI],g_ptrs.bset_contr[indF],
intensity.dimenmax,ideg,igammaI,g_ptrs.dipole_me,gpu_initial_vector,gpu_corr_vectors + intensity.dimenmax*ideg,
g_ptrs.threej,
gpu_half_ls + indF*intensity.dimenmax + ideg*intensity.dimenmax*nJ
,st_half_ls[indF]);
}else{
do_1st_half_ls_blocks(g_ptrs.bset_contr[indI],g_ptrs.bset_contr[indF],
intensity.dimenmax,ideg,igammaI,g_ptrs.dipole_me,intensity.dipole_blocks,gpu_initial_vector,gpu_corr_vectors + intensity.dimenmax*ideg,
g_ptrs.threej,
gpu_half_ls + indF*intensity.dimenmax + ideg*intensity.dimenmax*nJ
,st_half_ls[0]);
}
}
//wait for the next batch
}
vector_idx=0;
ilevelF=0;
int current_buff = 0;
//While the half_ls is being computed, lets load up some final state vectors
while(vector_idx < no_final_states_gpu && ilevelF < intensity.Neigenlevels)
{
// !
int indF = intensity.eigen[ilevelF].jind;
// !
//printf("indF=%i",indF);
// !dimension of the bases for the initial states
// !
// !
//!energy, quanta, and gedeneracy order of the initial state
// !
int jF = intensity.eigen[ilevelF].jval;
double energyF = intensity.eigen[ilevelF].energy;
int igammaF = intensity.eigen[ilevelF].igamma;
int * quantaF = intensity.eigen[ilevelF].quanta;
int * normalF = intensity.eigen[ilevelF].normal;
int nsizeF = intensity.bset_contr[indF+1].nsize[igammaF];
int irec = intensity.eigen[ilevelF].irec[0]-1;
FILE* unitF = eigenvec_unit[ indF + (igammaF)*2];
ilevelF++;
if(!energy_filter_upper(intensity,jF,energyF,quantaF)) {continue;}
if(!intensity_filter(intensity,jI,jF,energyI,energyF,igammaI,igammaF,quantaI,quantaF)) continue;
// store the level
vec_ilevel_buff[0][vector_idx] = ilevelF-1;
//printf("ilevelF=%i\n",vec_ilevel_buff[0][vector_idx]);
//Otherwise load the vector to a free slot
fseek(unitF,irec*nsizeF*sizeof(double),SEEK_SET);
fread(final_vectors + vector_idx*intensity.nsizemax,sizeof(double),nsizeF,unitF);
//Increment
vector_idx++;
}
vector_count = vector_idx;
//printf("memcopy");
//Memcopy it in one go
//cudaDeviceSynchronize();
cudaMemcpyAsync(gpu_final_vectors,final_vectors,sizeof(double)*intensity.nsizemax*vector_count,cudaMemcpyHostToDevice,f_memcpy) ;
cudaDeviceSynchronize(); //Wait till we're set up
CheckCudaError("Batch final vectors");
//printf("vector_count = %i\n",vector_count);
while(vector_count != 0)
{
int stream_count = 0;
for(int i = 0; i < vector_count; i++){
ilevelF = vec_ilevel_buff[int(current_buff)][i];
//printf("ilevelF=%i\n",ilevelF);
int indF = intensity.eigen[ilevelF].jind;
int jF = intensity.eigen[ilevelF].jval;
double energyF = intensity.eigen[ilevelF].energy;
int igammaF = intensity.eigen[ilevelF].igamma;
int * quantaF = intensity.eigen[ilevelF].quanta;
int * normalF = intensity.eigen[ilevelF].normal;
int nsizeF = intensity.bset_contr[indF+1].nsize[igammaF];
//int irec = intensity.eigen[ilevelF].irec[0]-1;
int dimenF = intensity.bset_contr[indF+1].Maxcontracts;
int ndegF = intensity.eigen[ilevelF].ndeg;
int blockSize =512;
int gridSize = (int)ceil((float)intensity.dimenmax/blockSize);
//for(int i = 0; i < ndeg
//Correlate the vectors
for(int idegF = 0; idegF < ndegF; idegF++){
//gridSize = (int)ceil((float)dimenF/blockSize);
for(int idegI=0; idegI < ndegI; idegI++)
line_str[i + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen] = 0.0;
if(intensity.reduced && idegF!=0) continue;
for(int idegI=0; idegI < ndegI; idegI++){
//line_str[i + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen]=0.0;
//line_str[i + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen] = 0.0;
if(!degeneracy_filter(intensity, igammaI,igammaF,idegI,idegF)) continue;
device_correlate_vectors<<<gridSize,blockSize,0,st_ddot_vectors[stream_count]>>>(g_ptrs.bset_contr[indF],idegF,igammaF, (gpu_final_vectors + i*intensity.nsizemax),gpu_corr_vectors + intensity.dimenmax*i);
cublasSetStream(handle,st_ddot_vectors[stream_count]);
cublasDdot (handle, dimenF,gpu_corr_vectors + intensity.dimenmax*i, 1, gpu_half_ls + indF*intensity.dimenmax + idegI*intensity.dimenmax*nJ, 1,
&line_str[i + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen]);
}
}
stream_count++;
if(stream_count >=16) stream_count=0;
}
//Record the events for synchronization
for(int i = 0; i < 16; i++){
cudaEventRecord(st_vec_done[i],st_ddot_vectors[i]);
cudaStreamWaitEvent ( f_memcpy,st_vec_done[i],0); //Make this stream wait for the event
}
current_buff = 1-current_buff;
vector_idx = 0;
ilevelF++;
//While the line_Strength is being computed, lets load up some final state vectors
while(vector_idx < no_final_states_gpu && ilevelF < intensity.Neigenlevels)
{
// !
int indF = intensity.eigen[ilevelF].jind;
// !
//printf("indF=%i",indF);
// !dimension of the bases for the initial states
// !
// !
//!energy, quanta, and gedeneracy order of the initial state
// !
int jF = intensity.eigen[ilevelF].jval;
double energyF = intensity.eigen[ilevelF].energy;
int igammaF = intensity.eigen[ilevelF].igamma;
int * quantaF = intensity.eigen[ilevelF].quanta;
int * normalF = intensity.eigen[ilevelF].normal;
int nsizeF = intensity.bset_contr[indF+1].nsize[igammaF];
int irec = intensity.eigen[ilevelF].irec[0]-1;
FILE* unitF = eigenvec_unit[ indF + (igammaF)*2];
ilevelF++;
if(!energy_filter_upper(intensity,jF,energyF,quantaF)) {continue;}
if(!intensity_filter(intensity,jI,jF,energyI,energyF,igammaI,igammaF,quantaI,quantaF)) continue;
// store the level
vec_ilevel_buff[current_buff][vector_idx] = ilevelF-1;
//load the vector to a free slot
fseek(unitF,irec*nsizeF*sizeof(double),SEEK_SET);
fread(final_vectors + vector_idx*intensity.nsizemax,sizeof(double),nsizeF,unitF);
//cudaMemcpyAsync(gpu_final_vectors,final_vectors,sizeof(double)*intensity.dimenmax*vector_count,cudaMemcpyHostToDevice,st_ddot_vectors[vector_idx]) ;
//Increment
vector_idx++;
}
last_ilevelF=ilevelF;
cudaMemcpyAsync(gpu_final_vectors,final_vectors,sizeof(double)*intensity.nsizemax*vector_count,cudaMemcpyHostToDevice,f_memcpy) ;
//We'e done now lets output
for(int i = 0; i < 16; i++)
cudaEventSynchronize(st_vec_done[i]); //wait for all events to be completed
for(int ivec = 0; ivec < vector_count; ivec++)
{
ilevelF = vec_ilevel_buff[1-current_buff][ivec];
//printf("ilevelF=%i\n",ilevelF);
int indF = intensity.eigen[ilevelF].jind;
int jF = intensity.eigen[ilevelF].jval;
double energyF = intensity.eigen[ilevelF].energy;
int igammaF = intensity.eigen[ilevelF].igamma;
int * quantaF = intensity.eigen[ilevelF].quanta;
int * normalF = intensity.eigen[ilevelF].normal;
int ndegF = intensity.eigen[ilevelF].ndeg;
//cudaStreamSynchronize(st_ddot_vectors[ivec]);
double ls=0.0;
double linestr=0.0;
for(int idegF=0; idegF < ndegF; idegF++){
for(int idegI=0; idegI < ndegI; idegI++){
linestr=line_str[ivec + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen];
ls +=(linestr*linestr); //line_str[ivec + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen]*line_str[ivec + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen];
}
}
ls /= double(ndegI);
if (intensity.reduced && ndegF!=1 && ndegI != 1) ls *= double(ndegI);
double final_ls = ls;
double nu_if = energyF - energyI;
boltz_fc = abs(nu_if) * exp(-(energyI-intensity.ZPE) * beta) * (1.0 - exp(-abs(nu_if) * beta))/ intensity.q_stat;
//Print intensitys
//printf("line_str %11.4e\n",line_str);
double A_einst = A_coef_s_1*double((2*jI)+1)*final_ls*pow(abs(nu_if),3);
final_ls = final_ls * intensity.gns[igammaI] * double( (2*jI + 1)*(2 * jF + 1) );
absorption_int = final_ls * intens_cm_mol * boltz_fc;
//if(final_ls < intensity.thresh_linestrength) continue;
/*
printf("%11.4f\t(%i %i ) ( ",nu_if,jF,intensity.eigen[ilevelF].krot);
for(int i = 0; i < intensity.molec.nmodes+1; i++)
printf("%i ",quantaF[i]);
printf(")\t <-- \t(%i %i ) ",jI,intensity.eigen[ilevelI].krot);
for(int i = 0; i < intensity.molec.nmodes+1; i++)
printf("%i ",quantaI[i]);
printf("\t %16.8e %16.8e %16.8e\n",A_einst,final_ls,orig_ls);
*/
/* write(out, "( (i4, 1x, a4, 3x),'<-', (i4, 1x, a4, 3x),a1,&
&(2x, f11.4,1x),'<-',(1x, f11.4,1x),f11.4,2x,&
&'(',1x,a3,x,i3,1x,')',1x,'(',1x,<nclasses>(x,a3),1x,<nmodes>(1x, i3),1x,')',1x,'<- ', &
&'(',1x,a3,x,i3,1x,')',1x,'(',1x,<nclasses>(x,a3),1x,<nmodes>(1x, i3),1x,')',1x, &
& 3(1x, es16.8),2x,(1x,i6,1x),'<-',(1x,i6,1x),i8,1x,i8,&
1x,'(',1x,<nmodes>(1x, i3),1x,')',1x,'<- ',1x,'(',1x,<nmodes>(1x, i3),1x,')',1x,&
<nformat>(1x, es16.8))") &
!
jF,sym%label(igammaF),jI,sym%label(igammaI),branch, &
energyF-intensity%ZPE,energyI-intensity%ZPE,nu_if, &
eigen(ilevelF)%cgamma(0),eigen(ilevelF)%krot,&
eigen(ilevelF)%cgamma(1:nclasses),eigen(ilevelF)%quanta(1:nmodes), &
eigen(ilevelI)%cgamma(0),eigen(ilevelI)%krot,&
eigen(ilevelI)%cgamma(1:nclasses),eigen(ilevelI)%quanta(1:nmodes), &
linestr,A_einst,absorption_int,&
eigen(ilevelF)%ilevel,eigen(ilevelI)%ilevel,&
itransit,istored(ilevelF),normalF(1:nmodes),normalI(1:nmodes),&
linestr_deg(1:ndegI,1:ndegF)
*/
#pragma omp critical(output_ls)
{
printf("%4i %4s <-%4i %4s %1s %11.4f <- %11.4f %11.4f ( %3s %3i ) ( ",jF,intensity.molec.c_sym[igammaF],jI,intensity.molec.c_sym[igammaI],branch(jF,jI),energyF-intensity.ZPE,energyI-intensity.ZPE,abs(nu_if),intensity.eigen[ilevelF].cgamma[0],intensity.eigen[ilevelF].krot);
for(int i = 1; i <= intensity.molec.nclasses; i++)
printf(" %3s",intensity.eigen[ilevelF].cgamma[i]);
printf(" ");
for(int i = 1; i <= intensity.molec.nmodes; i++)
printf(" %3i",quantaF[i]);
printf(" ) <- ( %3s %3i ) ( ",intensity.eigen[ilevelI].cgamma[0],intensity.eigen[ilevelI].krot);
for(int i = 1; i <= intensity.molec.nclasses; i++)
printf(" %3s",intensity.eigen[ilevelI].cgamma[i]);
printf(" ");
for(int i = 1; i <= intensity.molec.nmodes; i++)
printf(" %3i",quantaI[i]);
printf(") %16.8e %16.8e %16.8e %6i <- %6i %8i %8i ( ",final_ls,A_einst,absorption_int,intensity.eigen[ilevelF].ilevel+1,intensity.eigen[ilevelI].ilevel+1,0,0);
for(int i = 1; i <= intensity.molec.nmodes; i++)
printf(" %3i",normalF[i]);
printf(" ) <- ( ");
for(int i = 1; i <= intensity.molec.nmodes; i++)
printf(" %3i",normalI[i]);
printf(" ) ");
//printf(" ) %16.9e\n",1.23456789);
for(int idegF=0; idegF < ndegF; idegF++){
for(int idegI=0; idegI < ndegI; idegI++){
printf(" %16.9e",line_str[ivec + idegI*no_final_states_gpu + idegF*no_final_states_gpu*intensity.molec.sym_maxdegen]);
}
}
printf("\n");
}
}
//return;
ilevelF=last_ilevelF+1;
//Save the new vector_count
vector_count = vector_idx;
CheckCudaError("Compute final vectors");
cudaDeviceSynchronize();
}
}
// printf("Thread =%i done",device_id);
for(int i=0; i< 2; i++){
for(int j = 0; j < intensity.molec.sym_nrepres; j++)
{
if(!intensity.isym_do[j]) continue;
if(eigenvec_unit[i + j*2]!=NULL)
fclose(eigenvec_unit[i + j*2]);
}
}
cudaDeviceReset();
cudaFreeHost(&final_vectors);
cudaFreeHost(&line_str);
}
|
a4e9576bf2a3bfe1e21a0825622d6e09483ba855.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include <ctime>
#include <cstdlib>
#include "cycleTimer.h"
// #include "linear_recurrence_h.cuh"
#define CEIL_DIV(x, y) ((x + y - 1) / y)
#define gpuErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
void gpuAssert(hipError_t code, const char *file, int line) {
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
}
}
__device__ int2 divide_work(int n_jobs, int n_workers, int worker_idx) {
// Each worker will do a continuous slice of either n_jobs / n_workers
// or ceil_div(n_jobs, n_workers). The return value is an int2 representing
// a half open interval of jobs for the worker to perform (perform jobs
// i for a <= i < b)
int cd = CEIL_DIV(n_jobs, n_workers);
int d = n_jobs / n_workers;
int doing_cd = n_jobs % n_workers;
int2 retval;
if (worker_idx < doing_cd) {
retval.x = worker_idx * cd;
retval.y = retval.x + cd;
} else {
retval.x = doing_cd * cd + (worker_idx - doing_cd) * d;
retval.y = retval.x + d;
}
return retval;
}
__device__ int2 compute_warp_start_stop(int block_idx, int warp_idx,
int n_blocks, int n_steps) {
int2 block_ss = divide_work(n_steps, n_blocks, block_idx);
int block_start = block_ss.x;
int block_stop = block_ss.y;
int block_jobs = block_stop - block_start;
int2 warp_ss = divide_work(block_jobs, 32, warp_idx);
int warp_start = block_start + warp_ss.x;
int warp_stop = block_start + warp_ss.y;
int2 retval;
retval.x = warp_start;
retval.y = warp_stop;
return retval;
}
// decay storage, h_storage:
// each a n_dims x 33 x n_blocks matrix on GPU with 33rd column for block reduction
__global__ void reduction_kernel_baseline(float *decays, float *impulses,
float *initial_state,
float *_decay_storage, float *_h_storage,
int n_dims, int n_steps) {
int warp = threadIdx.x / 32;
int lane = threadIdx.x % 32;
float *decay_storage = &_decay_storage[blockIdx.x * 33 * n_dims];
float *h_storage = &_h_storage[blockIdx.x * 33 * n_dims];
int2 start_stop = compute_warp_start_stop(blockIdx.x, warp, gridDim.x, n_steps);
int warp_start = start_stop.x;
int warp_stop = start_stop.y;
/*
* Reduce within warps.
* After this loop exits, the storage arrays should contain the reduction
* from warp_start to warp_stop (including initial state) at index
* (feature_idx, warp, block).
*/
for (int i = lane; i < n_dims; i += 32) {
float cum_decay = 1.0;
float h = 0.0;
if (blockIdx.x == 0 && warp == 0 && initial_state != NULL) {
h = initial_state[i];
}
for (int t = warp_start; t < warp_stop; t++) {
cum_decay *= decays[i + t * n_dims];
h = decays[i + t * n_dims] * h + impulses[i + t * n_dims];
}
// TODO: store into shared memory, work in shared memory sized blocks
// store into global memory
decay_storage[i + warp * n_dims] = cum_decay;
h_storage[i + warp * n_dims] = h;
}
__syncthreads();
/*
* Reduce over warps.
* After this loop exits, the storage arrays should contain the reduction
* from block_start to block_finish (including initial state) at index
* (feature_idx, 32, block).
*/
// TODO: parallel reduction (or scan). Need to worry about changing the warp
// reduction values (as I use them again later)
for (int i = lane + 32 * warp; i < n_dims; i += blockDim.x) {
float cum_decay = 1.0;
float h = 0.0;
for (int t = 0; t < 32; t++) {
cum_decay *= decay_storage[i + t * n_dims];
h = decay_storage[i + t * n_dims] * h + h_storage[i + t * n_dims];
}
decay_storage[i + 32 * n_dims] = cum_decay;
h_storage[i + 32 * n_dims] = h;
}
}
__global__ void block_scan_kernel_baseline(float *decay_storage, float *h_storage,
int n_dims, int n_blocks) {
/*
* Scan over blocks.
* After this loop exits, the storage arrays should contain the cumulative sum
* from block_idx 0 to i (inclusive) at index (feature_idx, 32, i)
* This means (feature_idx, 32, 2) contains the reduction of blocks 0, 1, and 2.
*/
// TODO: parallel scan (tricky because number of blocks isn't necessarily
// smaller than number of warps that can fit in a single block)
for (int i = threadIdx.x + blockIdx.x * blockDim.x;
i < n_dims;
i += blockDim.x * gridDim.x) {
for (int t = 1; t < n_blocks; t++) {
int cur_idx = i + 32 * n_dims + t * 33 * n_dims;
int prev_idx = i + 32 * n_dims + (t - 1) * 33 * n_dims;
// TODO: remove unneccessary reads from global memory (prev_idx accesses)
h_storage[cur_idx] = decay_storage[cur_idx] * h_storage[prev_idx] + h_storage[cur_idx];
decay_storage[cur_idx] *= decay_storage[prev_idx];
}
}
}
__global__ void warp_scan_kernel_baseline(float *decays, float *impulses,
float *initial_state, float *out,
float *decay_storage, float *h_storage,
int n_dims, int n_steps) {
int warp = threadIdx.x / 32;
int lane = threadIdx.x % 32;
// Note: Due to the index ordering of the storage arrays, the following
// indices are equivalent:
//
// i + (t - 1) * n_dims + blockIdx.x * 33 * n_dims
// i + 32 * n_dims + (blockIdx.x - 1) * 33 * n_dims
//
// when t is 0. This means something that looks like negative indexing
// (t-1) can be used to safely access the stored value for the previous
// warp (even if the previous warp belonged to the previous block).
/*
* Scan over warps.
* After this loop executes, the storage arrays should contain the cumulative
* sum from the beginning of sequence (including initial condition) up to
* and including the indexed warp and block.
*/
// TODO: parallel scan
for (int i = lane + 32 * warp; i < n_dims; i += blockDim.x) {
for (int t = 0; t < 32; t++) {
if (t == 0 && blockIdx.x == 0) {
// the reduction over warp 0 (including initial condition) is correct val
// for scan, so there's no work to do
continue;
}
int cur_idx = i + t * n_dims + blockIdx.x * 33 * n_dims;
int prev_idx = i + (t - 1) * n_dims + blockIdx.x * 33 * n_dims;
h_storage[cur_idx] = decay_storage[cur_idx] * h_storage[prev_idx] + h_storage[cur_idx];
decay_storage[cur_idx] *= decay_storage[prev_idx];
}
}
__syncthreads();
int2 start_stop = compute_warp_start_stop(blockIdx.x, warp, gridDim.x, n_steps);
int warp_start = start_stop.x;
int warp_stop = start_stop.y;
/*
* Scan within warps.
* This loop writes to the output array. Each warp reads in it's initial state
* (either from the "initial_state" or the storage arrays) and then writes
* to output for indices warp_start up to warp_stop.
*/
for (int i = lane; i < n_dims; i += 32) {
float h = 0.0;
if (blockIdx.x == 0 && warp == 0) {
if (initial_state != NULL) {
h = initial_state[i];
}
} else {
h = h_storage[i + (warp - 1) * n_dims + blockIdx.x * 33 * n_dims];
}
for (int t = warp_start; t < warp_stop; t++) {
h = decays[i + t * n_dims] * h + impulses[i + t * n_dims];
out[i + t * n_dims] = h;
}
}
}
__global__ void serial_linear_recurrence_baseline(float *decays, float *impulses,
float *initial_state, float *out,
int n_dims, int n_steps) {
// computes h_t = lambda_t h{t-1} + x_t
for (int dim_idx = threadIdx.x + blockIdx.x * blockDim.x;
dim_idx < n_dims;
dim_idx += blockDim.x * gridDim.x) {
float val = 0.0;
if (initial_state != NULL) {
val = initial_state[dim_idx];
}
for (int step = 0; step < n_steps; step++) {
int idx = dim_idx + step * n_dims;
val = decays[idx] * val + impulses[idx];
out[idx] = val;
}
}
}
extern "C" {
/*
* This is the main method for the prefix sum kernels.
* decays, impulses, out:
* each a n_dims x n_steps column major matrix located on GPU
* initial_state:
* array of size n_dims located on GPU
*/
void compute_linear_recurrence_baseline(float *decays, float *impulses, float *initial_state,
float *out, int n_dims, int n_steps) {
// TODO: query
int n_SMs = 13;
int n_blocks_per_sm = 2;
// we want at least 32 elements per block, but no reason to run
// with more than the maximum number of concurrent blocks
int n_blocks = min(CEIL_DIV(n_steps, 32), n_SMs * n_blocks_per_sm);
// TODO: make user pass in working memory? This allows integration
// with CNMeM (used by Theano)
int reduction_mem_sz = 2 * n_blocks * 33 * n_dims * sizeof(float);
float *d_reduction_mem;
gpuErrChk(hipMalloc(&d_reduction_mem, reduction_mem_sz));
float *d_decay_storage = &d_reduction_mem[0 * n_blocks * 33 * n_dims];
float *d_h_storage = &d_reduction_mem[1 * n_blocks * 33 * n_dims];
// TODO: run kernels on non-default stream?
#if DEBUG
double reduce_start = CycleTimer::currentSeconds();
#endif
hipLaunchKernelGGL(( reduction_kernel_baseline), dim3(n_blocks), dim3(1024), 0, 0, decays, impulses, initial_state,
d_decay_storage, d_h_storage,
n_dims, n_steps);
#if DEBUG
double reduce_time = CycleTimer::currentSeconds() - reduce_start;
#endif
#if DEBUG
double scan_start = CycleTimer::currentSeconds();
#endif
hipLaunchKernelGGL(( block_scan_kernel_baseline), dim3(n_blocks), dim3(1024), 0, 0, d_decay_storage, d_h_storage,
n_dims, n_blocks);
#if DEBUG
double scan_time = CycleTimer::currentSeconds() - scan_start;
#endif
#if DEBUG
double expand_start = CycleTimer::currentSeconds();
#endif
hipLaunchKernelGGL(( warp_scan_kernel_baseline), dim3(n_blocks), dim3(1024), 0, 0, decays, impulses,
initial_state, out,
d_decay_storage, d_h_storage,
n_dims, n_steps);
#if DEBUG
double expand_time = CycleTimer::currentSeconds() - expand_start;
#endif
gpuErrChk(hipFree(d_reduction_mem));
#if DEBUG
printf("BASE\n");
printf("Reduce: %.4f ms\n", 1000.f * reduce_time);
printf("Scan: %.4f ms\n", 1000.f * scan_time);
printf("Expand: %.4f ms\n", 1000.f * expand_time);
printf("TOTAL: %.4f ms\n", 1000.f * (reduce_time + scan_time + expand_time));
printf("\n");
#endif
}
void compute_serial_linear_recurrence_baseline(float *decays, float *impulses,
float *initial_state, float *out,
int n_dims, int n_steps) {
// TODO: query
int n_SMs = 13;
int n_blocks_per_sm = 2;
int n_blocks = n_SMs * n_blocks_per_sm;
#if DEBUG
double total_start = CycleTimer::currentSeconds();
#endif
hipLaunchKernelGGL(( serial_linear_recurrence_baseline), dim3(n_blocks), dim3(1024), 0, 0, decays, impulses, initial_state,
out, n_dims, n_steps);
#if DEBUG
double total_end = CycleTimer::currentSeconds();
#endif
#if DEBUG
printf("SERIAL\n");
printf("Total: %.4f ms\n", 1000.f * (total_end - total_start));
printf("\n");
#endif
}
}
float* test_base(int n_dims, int n_steps) {
// int n_dims = 2; //100;
// int n_steps = 10; //1000000;
int n_elements = n_dims * n_steps;
float *decays = (float *) calloc(n_elements, sizeof(float));
for (int i = 0; i < n_elements; i++) {
decays[i] = .9;
}
float *d_decays;
gpuErrChk(hipMalloc(&d_decays, n_elements * sizeof(float)));
gpuErrChk(hipMemcpy(d_decays, decays, n_elements * sizeof(float),
hipMemcpyHostToDevice));
float *impulses = (float *) calloc(n_elements, sizeof(float));
for (int i = 0; i < n_dims; i++) {
impulses[i + 0 * n_dims] = 2.0;
}
// printf("\nInput (decays, impulses): ");
// for(int i=0; i<n_elements; i++)
// {
// printf("(%f,%f) ", decays[i], impulses[i]);
// }
float *d_impulses;
gpuErrChk(hipMalloc(&d_impulses, n_elements * sizeof(float)));
gpuErrChk(hipMemcpy(d_impulses, impulses,
n_elements * sizeof(float), hipMemcpyHostToDevice));
float *out = (float *) calloc(n_elements, sizeof(float));
float *d_out;
gpuErrChk(hipMalloc(&d_out, n_elements * sizeof(float)));
gpuErrChk(hipMemset(d_out, 0, n_elements * sizeof(float)));
compute_linear_recurrence_baseline(d_decays, d_impulses, NULL, d_out, n_dims, n_steps);
gpuErrChk(hipMemcpy(out, d_out, n_elements * sizeof(float),
hipMemcpyDeviceToHost));
gpuErrChk(hipFree(d_decays));
gpuErrChk(hipFree(d_impulses));
gpuErrChk(hipFree(d_out));
return out;
}
void profile_serial() {
srand (static_cast <unsigned> (time(0)));
int n_steps = 16777216;
int n_dims = 32;
int n_elements = n_dims * n_steps;
int n_SMs = 13;
int n_blocks_per_sm = 2;
int n_blocks = min(CEIL_DIV(n_steps, 32), n_SMs * n_blocks_per_sm);
float *d_decays;
gpuErrChk(hipMalloc(&d_decays, n_elements * sizeof(float)));
float *d_impulses;
gpuErrChk(hipMalloc(&d_impulses, n_elements * sizeof(float)));
float *d_out;
gpuErrChk(hipMalloc(&d_out, n_elements * sizeof(float)));
float *decays = (float *)malloc(n_elements * sizeof(float));
float *impulses = (float *)malloc(n_elements * sizeof(float));
for (int i = 0; i < n_elements; i++) {
decays[i] = -2.0 + static_cast <float> (rand()) / ( static_cast <float> (RAND_MAX / 4.0));
impulses[i] = -1.0 + static_cast <float> (rand()) / ( static_cast <float> (RAND_MAX / 2.0));
}
gpuErrChk(hipMemcpy(d_decays, decays,
n_elements * sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMemcpy(d_impulses, impulses,
n_elements * sizeof(float), hipMemcpyHostToDevice));
printf("SERIAL\n");
// double total_time = 0.0;
// double total_start;
// for (int i = 0; i < n_iters; i++) {
// total_start = CycleTimer::currentSeconds();
hipLaunchKernelGGL(( serial_linear_recurrence_baseline), dim3(n_blocks), dim3(1024), 0, 0, d_decays, d_impulses, NULL,
d_out, n_dims, n_steps);
// hipDeviceSynchronize();
// total_time += CycleTimer::currentSeconds() - total_start;
// }
// printf("TOTAL: %.4f s \n", total_time);
gpuErrChk(hipFree(d_decays));
gpuErrChk(hipFree(d_impulses));
gpuErrChk(hipFree(d_out));
}
void profile_base() {
srand (static_cast <unsigned> (time(0)));
int n_steps = 16777216;
int n_dims = 32;
int n_elements = n_dims * n_steps;
int n_SMs = 13;
int n_blocks_per_sm = 2;
int n_blocks = min(CEIL_DIV(n_steps, 32), n_SMs * n_blocks_per_sm);
int reduction_mem_sz = 2 * n_blocks * 33 * n_dims * sizeof(float);
float *d_decays;
gpuErrChk(hipMalloc(&d_decays, n_elements * sizeof(float)));
float *d_impulses;
gpuErrChk(hipMalloc(&d_impulses, n_elements * sizeof(float)));
float *d_out;
gpuErrChk(hipMalloc(&d_out, n_elements * sizeof(float)));
float *decays = (float *)malloc(n_elements * sizeof(float));
float *impulses = (float *)malloc(n_elements * sizeof(float));
for (int i = 0; i < n_elements; i++) {
decays[i] = -2.0 + static_cast <float> (rand()) / ( static_cast <float> (RAND_MAX / 4.0));
impulses[i] = -1.0 + static_cast <float> (rand()) / ( static_cast <float> (RAND_MAX / 2.0));
}
gpuErrChk(hipMemcpy(d_decays, decays,
n_elements * sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMemcpy(d_impulses, impulses,
n_elements * sizeof(float), hipMemcpyHostToDevice));
float *d_reduction_mem;
gpuErrChk(hipMalloc(&d_reduction_mem, reduction_mem_sz));
float *d_decay_storage = &d_reduction_mem[0 * n_blocks * 33 * n_dims];
float *d_h_storage = &d_reduction_mem[1 * n_blocks * 33 * n_dims];
// double reduce_time = 0.f;
// double scan_time = 0.f;
// double expand_time = 0.f;
printf("BASE\n");
// double reduce_start;
// double scan_start;
// double expand_start;
// double total_start = CycleTimer::currentSeconds();
// for (int i = 0; i < n_iters; i++) {
// reduce_start = CycleTimer::currentSeconds();
hipLaunchKernelGGL(( reduction_kernel_baseline), dim3(n_blocks), dim3(1024), 0, 0, d_decays, d_impulses, NULL,
d_decay_storage, d_h_storage,
n_dims, n_steps);
// hipDeviceSynchronize();
// reduce_time += CycleTimer::currentSeconds() - reduce_start;
// scan_start = CycleTimer::currentSeconds();
hipLaunchKernelGGL(( block_scan_kernel_baseline), dim3(n_blocks), dim3(1024), 0, 0, d_decay_storage, d_h_storage,
n_dims, n_blocks);
// hipDeviceSynchronize();
// scan_time += CycleTimer::currentSeconds() - scan_start;
// expand_start = CycleTimer::currentSeconds();
hipLaunchKernelGGL(( warp_scan_kernel_baseline), dim3(n_blocks), dim3(1024), 0, 0, d_decays, d_impulses,
NULL, d_out,
d_decay_storage, d_h_storage,
n_dims, n_steps);
// hipDeviceSynchronize();
// expand_time += CycleTimer::currentSeconds() - expand_start;
// }
// double total_time = CycleTimer::currentSeconds() - total_start;
// double sum_time = reduce_time + scan_time + expand_time;
// printf("Reduce: %.4f s (%.3f)\n", reduce_time, reduce_time / sum_time);
// printf("Scan: %.4f s (%.3f)\n", scan_time, scan_time / sum_time);
// printf("Expand: %.4f s (%.3f)\n", expand_time, expand_time / sum_time);
// printf("TOTAL: %.4f s \n", total_time);
gpuErrChk(hipFree(d_reduction_mem));
gpuErrChk(hipFree(d_decays));
gpuErrChk(hipFree(d_impulses));
gpuErrChk(hipFree(d_out));
}
|
a4e9576bf2a3bfe1e21a0825622d6e09483ba855.cu
|
#include <assert.h>
#include <stdio.h>
#include <ctime>
#include <cstdlib>
#include "cycleTimer.h"
// #include "linear_recurrence_h.cuh"
#define CEIL_DIV(x, y) ((x + y - 1) / y)
#define gpuErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
void gpuAssert(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
}
}
__device__ int2 divide_work(int n_jobs, int n_workers, int worker_idx) {
// Each worker will do a continuous slice of either n_jobs / n_workers
// or ceil_div(n_jobs, n_workers). The return value is an int2 representing
// a half open interval of jobs for the worker to perform (perform jobs
// i for a <= i < b)
int cd = CEIL_DIV(n_jobs, n_workers);
int d = n_jobs / n_workers;
int doing_cd = n_jobs % n_workers;
int2 retval;
if (worker_idx < doing_cd) {
retval.x = worker_idx * cd;
retval.y = retval.x + cd;
} else {
retval.x = doing_cd * cd + (worker_idx - doing_cd) * d;
retval.y = retval.x + d;
}
return retval;
}
__device__ int2 compute_warp_start_stop(int block_idx, int warp_idx,
int n_blocks, int n_steps) {
int2 block_ss = divide_work(n_steps, n_blocks, block_idx);
int block_start = block_ss.x;
int block_stop = block_ss.y;
int block_jobs = block_stop - block_start;
int2 warp_ss = divide_work(block_jobs, 32, warp_idx);
int warp_start = block_start + warp_ss.x;
int warp_stop = block_start + warp_ss.y;
int2 retval;
retval.x = warp_start;
retval.y = warp_stop;
return retval;
}
// decay storage, h_storage:
// each a n_dims x 33 x n_blocks matrix on GPU with 33rd column for block reduction
__global__ void reduction_kernel_baseline(float *decays, float *impulses,
float *initial_state,
float *_decay_storage, float *_h_storage,
int n_dims, int n_steps) {
int warp = threadIdx.x / 32;
int lane = threadIdx.x % 32;
float *decay_storage = &_decay_storage[blockIdx.x * 33 * n_dims];
float *h_storage = &_h_storage[blockIdx.x * 33 * n_dims];
int2 start_stop = compute_warp_start_stop(blockIdx.x, warp, gridDim.x, n_steps);
int warp_start = start_stop.x;
int warp_stop = start_stop.y;
/*
* Reduce within warps.
* After this loop exits, the storage arrays should contain the reduction
* from warp_start to warp_stop (including initial state) at index
* (feature_idx, warp, block).
*/
for (int i = lane; i < n_dims; i += 32) {
float cum_decay = 1.0;
float h = 0.0;
if (blockIdx.x == 0 && warp == 0 && initial_state != NULL) {
h = initial_state[i];
}
for (int t = warp_start; t < warp_stop; t++) {
cum_decay *= decays[i + t * n_dims];
h = decays[i + t * n_dims] * h + impulses[i + t * n_dims];
}
// TODO: store into shared memory, work in shared memory sized blocks
// store into global memory
decay_storage[i + warp * n_dims] = cum_decay;
h_storage[i + warp * n_dims] = h;
}
__syncthreads();
/*
* Reduce over warps.
* After this loop exits, the storage arrays should contain the reduction
* from block_start to block_finish (including initial state) at index
* (feature_idx, 32, block).
*/
// TODO: parallel reduction (or scan). Need to worry about changing the warp
// reduction values (as I use them again later)
for (int i = lane + 32 * warp; i < n_dims; i += blockDim.x) {
float cum_decay = 1.0;
float h = 0.0;
for (int t = 0; t < 32; t++) {
cum_decay *= decay_storage[i + t * n_dims];
h = decay_storage[i + t * n_dims] * h + h_storage[i + t * n_dims];
}
decay_storage[i + 32 * n_dims] = cum_decay;
h_storage[i + 32 * n_dims] = h;
}
}
__global__ void block_scan_kernel_baseline(float *decay_storage, float *h_storage,
int n_dims, int n_blocks) {
/*
* Scan over blocks.
* After this loop exits, the storage arrays should contain the cumulative sum
* from block_idx 0 to i (inclusive) at index (feature_idx, 32, i)
* This means (feature_idx, 32, 2) contains the reduction of blocks 0, 1, and 2.
*/
// TODO: parallel scan (tricky because number of blocks isn't necessarily
// smaller than number of warps that can fit in a single block)
for (int i = threadIdx.x + blockIdx.x * blockDim.x;
i < n_dims;
i += blockDim.x * gridDim.x) {
for (int t = 1; t < n_blocks; t++) {
int cur_idx = i + 32 * n_dims + t * 33 * n_dims;
int prev_idx = i + 32 * n_dims + (t - 1) * 33 * n_dims;
// TODO: remove unneccessary reads from global memory (prev_idx accesses)
h_storage[cur_idx] = decay_storage[cur_idx] * h_storage[prev_idx] + h_storage[cur_idx];
decay_storage[cur_idx] *= decay_storage[prev_idx];
}
}
}
__global__ void warp_scan_kernel_baseline(float *decays, float *impulses,
float *initial_state, float *out,
float *decay_storage, float *h_storage,
int n_dims, int n_steps) {
int warp = threadIdx.x / 32;
int lane = threadIdx.x % 32;
// Note: Due to the index ordering of the storage arrays, the following
// indices are equivalent:
//
// i + (t - 1) * n_dims + blockIdx.x * 33 * n_dims
// i + 32 * n_dims + (blockIdx.x - 1) * 33 * n_dims
//
// when t is 0. This means something that looks like negative indexing
// (t-1) can be used to safely access the stored value for the previous
// warp (even if the previous warp belonged to the previous block).
/*
* Scan over warps.
* After this loop executes, the storage arrays should contain the cumulative
* sum from the beginning of sequence (including initial condition) up to
* and including the indexed warp and block.
*/
// TODO: parallel scan
for (int i = lane + 32 * warp; i < n_dims; i += blockDim.x) {
for (int t = 0; t < 32; t++) {
if (t == 0 && blockIdx.x == 0) {
// the reduction over warp 0 (including initial condition) is correct val
// for scan, so there's no work to do
continue;
}
int cur_idx = i + t * n_dims + blockIdx.x * 33 * n_dims;
int prev_idx = i + (t - 1) * n_dims + blockIdx.x * 33 * n_dims;
h_storage[cur_idx] = decay_storage[cur_idx] * h_storage[prev_idx] + h_storage[cur_idx];
decay_storage[cur_idx] *= decay_storage[prev_idx];
}
}
__syncthreads();
int2 start_stop = compute_warp_start_stop(blockIdx.x, warp, gridDim.x, n_steps);
int warp_start = start_stop.x;
int warp_stop = start_stop.y;
/*
* Scan within warps.
* This loop writes to the output array. Each warp reads in it's initial state
* (either from the "initial_state" or the storage arrays) and then writes
* to output for indices warp_start up to warp_stop.
*/
for (int i = lane; i < n_dims; i += 32) {
float h = 0.0;
if (blockIdx.x == 0 && warp == 0) {
if (initial_state != NULL) {
h = initial_state[i];
}
} else {
h = h_storage[i + (warp - 1) * n_dims + blockIdx.x * 33 * n_dims];
}
for (int t = warp_start; t < warp_stop; t++) {
h = decays[i + t * n_dims] * h + impulses[i + t * n_dims];
out[i + t * n_dims] = h;
}
}
}
__global__ void serial_linear_recurrence_baseline(float *decays, float *impulses,
float *initial_state, float *out,
int n_dims, int n_steps) {
// computes h_t = lambda_t h{t-1} + x_t
for (int dim_idx = threadIdx.x + blockIdx.x * blockDim.x;
dim_idx < n_dims;
dim_idx += blockDim.x * gridDim.x) {
float val = 0.0;
if (initial_state != NULL) {
val = initial_state[dim_idx];
}
for (int step = 0; step < n_steps; step++) {
int idx = dim_idx + step * n_dims;
val = decays[idx] * val + impulses[idx];
out[idx] = val;
}
}
}
extern "C" {
/*
* This is the main method for the prefix sum kernels.
* decays, impulses, out:
* each a n_dims x n_steps column major matrix located on GPU
* initial_state:
* array of size n_dims located on GPU
*/
void compute_linear_recurrence_baseline(float *decays, float *impulses, float *initial_state,
float *out, int n_dims, int n_steps) {
// TODO: query
int n_SMs = 13;
int n_blocks_per_sm = 2;
// we want at least 32 elements per block, but no reason to run
// with more than the maximum number of concurrent blocks
int n_blocks = min(CEIL_DIV(n_steps, 32), n_SMs * n_blocks_per_sm);
// TODO: make user pass in working memory? This allows integration
// with CNMeM (used by Theano)
int reduction_mem_sz = 2 * n_blocks * 33 * n_dims * sizeof(float);
float *d_reduction_mem;
gpuErrChk(cudaMalloc(&d_reduction_mem, reduction_mem_sz));
float *d_decay_storage = &d_reduction_mem[0 * n_blocks * 33 * n_dims];
float *d_h_storage = &d_reduction_mem[1 * n_blocks * 33 * n_dims];
// TODO: run kernels on non-default stream?
#if DEBUG
double reduce_start = CycleTimer::currentSeconds();
#endif
reduction_kernel_baseline<<<n_blocks, 1024>>>(decays, impulses, initial_state,
d_decay_storage, d_h_storage,
n_dims, n_steps);
#if DEBUG
double reduce_time = CycleTimer::currentSeconds() - reduce_start;
#endif
#if DEBUG
double scan_start = CycleTimer::currentSeconds();
#endif
block_scan_kernel_baseline<<<n_blocks, 1024>>>(d_decay_storage, d_h_storage,
n_dims, n_blocks);
#if DEBUG
double scan_time = CycleTimer::currentSeconds() - scan_start;
#endif
#if DEBUG
double expand_start = CycleTimer::currentSeconds();
#endif
warp_scan_kernel_baseline<<<n_blocks, 1024>>>(decays, impulses,
initial_state, out,
d_decay_storage, d_h_storage,
n_dims, n_steps);
#if DEBUG
double expand_time = CycleTimer::currentSeconds() - expand_start;
#endif
gpuErrChk(cudaFree(d_reduction_mem));
#if DEBUG
printf("BASE\n");
printf("Reduce: %.4f ms\n", 1000.f * reduce_time);
printf("Scan: %.4f ms\n", 1000.f * scan_time);
printf("Expand: %.4f ms\n", 1000.f * expand_time);
printf("TOTAL: %.4f ms\n", 1000.f * (reduce_time + scan_time + expand_time));
printf("\n");
#endif
}
void compute_serial_linear_recurrence_baseline(float *decays, float *impulses,
float *initial_state, float *out,
int n_dims, int n_steps) {
// TODO: query
int n_SMs = 13;
int n_blocks_per_sm = 2;
int n_blocks = n_SMs * n_blocks_per_sm;
#if DEBUG
double total_start = CycleTimer::currentSeconds();
#endif
serial_linear_recurrence_baseline<<<n_blocks, 1024>>>(decays, impulses, initial_state,
out, n_dims, n_steps);
#if DEBUG
double total_end = CycleTimer::currentSeconds();
#endif
#if DEBUG
printf("SERIAL\n");
printf("Total: %.4f ms\n", 1000.f * (total_end - total_start));
printf("\n");
#endif
}
}
float* test_base(int n_dims, int n_steps) {
// int n_dims = 2; //100;
// int n_steps = 10; //1000000;
int n_elements = n_dims * n_steps;
float *decays = (float *) calloc(n_elements, sizeof(float));
for (int i = 0; i < n_elements; i++) {
decays[i] = .9;
}
float *d_decays;
gpuErrChk(cudaMalloc(&d_decays, n_elements * sizeof(float)));
gpuErrChk(cudaMemcpy(d_decays, decays, n_elements * sizeof(float),
cudaMemcpyHostToDevice));
float *impulses = (float *) calloc(n_elements, sizeof(float));
for (int i = 0; i < n_dims; i++) {
impulses[i + 0 * n_dims] = 2.0;
}
// printf("\nInput (decays, impulses): ");
// for(int i=0; i<n_elements; i++)
// {
// printf("(%f,%f) ", decays[i], impulses[i]);
// }
float *d_impulses;
gpuErrChk(cudaMalloc(&d_impulses, n_elements * sizeof(float)));
gpuErrChk(cudaMemcpy(d_impulses, impulses,
n_elements * sizeof(float), cudaMemcpyHostToDevice));
float *out = (float *) calloc(n_elements, sizeof(float));
float *d_out;
gpuErrChk(cudaMalloc(&d_out, n_elements * sizeof(float)));
gpuErrChk(cudaMemset(d_out, 0, n_elements * sizeof(float)));
compute_linear_recurrence_baseline(d_decays, d_impulses, NULL, d_out, n_dims, n_steps);
gpuErrChk(cudaMemcpy(out, d_out, n_elements * sizeof(float),
cudaMemcpyDeviceToHost));
gpuErrChk(cudaFree(d_decays));
gpuErrChk(cudaFree(d_impulses));
gpuErrChk(cudaFree(d_out));
return out;
}
void profile_serial() {
srand (static_cast <unsigned> (time(0)));
int n_steps = 16777216;
int n_dims = 32;
int n_elements = n_dims * n_steps;
int n_SMs = 13;
int n_blocks_per_sm = 2;
int n_blocks = min(CEIL_DIV(n_steps, 32), n_SMs * n_blocks_per_sm);
float *d_decays;
gpuErrChk(cudaMalloc(&d_decays, n_elements * sizeof(float)));
float *d_impulses;
gpuErrChk(cudaMalloc(&d_impulses, n_elements * sizeof(float)));
float *d_out;
gpuErrChk(cudaMalloc(&d_out, n_elements * sizeof(float)));
float *decays = (float *)malloc(n_elements * sizeof(float));
float *impulses = (float *)malloc(n_elements * sizeof(float));
for (int i = 0; i < n_elements; i++) {
decays[i] = -2.0 + static_cast <float> (rand()) / ( static_cast <float> (RAND_MAX / 4.0));
impulses[i] = -1.0 + static_cast <float> (rand()) / ( static_cast <float> (RAND_MAX / 2.0));
}
gpuErrChk(cudaMemcpy(d_decays, decays,
n_elements * sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMemcpy(d_impulses, impulses,
n_elements * sizeof(float), cudaMemcpyHostToDevice));
printf("SERIAL\n");
// double total_time = 0.0;
// double total_start;
// for (int i = 0; i < n_iters; i++) {
// total_start = CycleTimer::currentSeconds();
serial_linear_recurrence_baseline<<<n_blocks, 1024>>>(d_decays, d_impulses, NULL,
d_out, n_dims, n_steps);
// cudaDeviceSynchronize();
// total_time += CycleTimer::currentSeconds() - total_start;
// }
// printf("TOTAL: %.4f s \n", total_time);
gpuErrChk(cudaFree(d_decays));
gpuErrChk(cudaFree(d_impulses));
gpuErrChk(cudaFree(d_out));
}
void profile_base() {
srand (static_cast <unsigned> (time(0)));
int n_steps = 16777216;
int n_dims = 32;
int n_elements = n_dims * n_steps;
int n_SMs = 13;
int n_blocks_per_sm = 2;
int n_blocks = min(CEIL_DIV(n_steps, 32), n_SMs * n_blocks_per_sm);
int reduction_mem_sz = 2 * n_blocks * 33 * n_dims * sizeof(float);
float *d_decays;
gpuErrChk(cudaMalloc(&d_decays, n_elements * sizeof(float)));
float *d_impulses;
gpuErrChk(cudaMalloc(&d_impulses, n_elements * sizeof(float)));
float *d_out;
gpuErrChk(cudaMalloc(&d_out, n_elements * sizeof(float)));
float *decays = (float *)malloc(n_elements * sizeof(float));
float *impulses = (float *)malloc(n_elements * sizeof(float));
for (int i = 0; i < n_elements; i++) {
decays[i] = -2.0 + static_cast <float> (rand()) / ( static_cast <float> (RAND_MAX / 4.0));
impulses[i] = -1.0 + static_cast <float> (rand()) / ( static_cast <float> (RAND_MAX / 2.0));
}
gpuErrChk(cudaMemcpy(d_decays, decays,
n_elements * sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMemcpy(d_impulses, impulses,
n_elements * sizeof(float), cudaMemcpyHostToDevice));
float *d_reduction_mem;
gpuErrChk(cudaMalloc(&d_reduction_mem, reduction_mem_sz));
float *d_decay_storage = &d_reduction_mem[0 * n_blocks * 33 * n_dims];
float *d_h_storage = &d_reduction_mem[1 * n_blocks * 33 * n_dims];
// double reduce_time = 0.f;
// double scan_time = 0.f;
// double expand_time = 0.f;
printf("BASE\n");
// double reduce_start;
// double scan_start;
// double expand_start;
// double total_start = CycleTimer::currentSeconds();
// for (int i = 0; i < n_iters; i++) {
// reduce_start = CycleTimer::currentSeconds();
reduction_kernel_baseline<<<n_blocks, 1024>>>(d_decays, d_impulses, NULL,
d_decay_storage, d_h_storage,
n_dims, n_steps);
// cudaDeviceSynchronize();
// reduce_time += CycleTimer::currentSeconds() - reduce_start;
// scan_start = CycleTimer::currentSeconds();
block_scan_kernel_baseline<<<n_blocks, 1024>>>(d_decay_storage, d_h_storage,
n_dims, n_blocks);
// cudaDeviceSynchronize();
// scan_time += CycleTimer::currentSeconds() - scan_start;
// expand_start = CycleTimer::currentSeconds();
warp_scan_kernel_baseline<<<n_blocks, 1024>>>(d_decays, d_impulses,
NULL, d_out,
d_decay_storage, d_h_storage,
n_dims, n_steps);
// cudaDeviceSynchronize();
// expand_time += CycleTimer::currentSeconds() - expand_start;
// }
// double total_time = CycleTimer::currentSeconds() - total_start;
// double sum_time = reduce_time + scan_time + expand_time;
// printf("Reduce: %.4f s (%.3f)\n", reduce_time, reduce_time / sum_time);
// printf("Scan: %.4f s (%.3f)\n", scan_time, scan_time / sum_time);
// printf("Expand: %.4f s (%.3f)\n", expand_time, expand_time / sum_time);
// printf("TOTAL: %.4f s \n", total_time);
gpuErrChk(cudaFree(d_reduction_mem));
gpuErrChk(cudaFree(d_decays));
gpuErrChk(cudaFree(d_impulses));
gpuErrChk(cudaFree(d_out));
}
|
76b6ab3bd477272aaf47f67b4b9aa87d4bfba617.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ImageColumn.cuh"
#include "MNNCUDADefine.hpp"
#include "MNNCUDAFunction.cuh"
#include "Raster.cuh"
#define BLOCK_INT4 2
namespace MNN {
namespace CUDA {
__global__ void Im2Col1x1(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const float* A,
half* AP,
const int ePack,
const int eShift,
DivModFast eAlignD,
DivModFast owD,
DivModFast ohD
) {
int eAlign = matmulParam->elhPack[0] * ePack;
int lAlign = matmulParam->elhPack[1];
int maxCount = eAlign * lAlign * BLOCK_INT4;
int kernelCount = 1;
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
int index = indexO >> 1;
int lR = indexO & 1;
int eIndex, lIndex;
eAlignD.divmod(index, lIndex, eIndex);
int eU = eIndex >> eShift;
int eR = eIndex & (ePack-1);
int dstOffset = eU * matmulParam->elhPack[1] * (ePack * MATMULPACK) + lIndex * (ePack * MATMULPACK) + eR * MATMULPACK + lR * 8;
int4* dst = (int4*)(AP + dstOffset);
if (eIndex >= matmulParam->elh[0]) {
*dst = {0, 0, 0, 0};
continue;
}
// Compute for source
int ox, oy, ob;
owD.divmod(eIndex, oy, ox);
ohD.divmod(oy, ob, oy);
int sz = lIndex;
int sx = ox * param->strideX - param->padX;
int sy = oy * param->strideY - param->padY;
if (sx >= 0 && sx < param->iw) {
if (sy >=0 && sy < param->ih) {
int offset = sz * param->srcZStep + (ob * param->iw * param->ih + sy * param->iw + sx) * PACK_NUMBER + lR * 8;
float2* srcF = (float2*)(A + offset);
half2* dstH = (half2*)dst;
dstH[0] = __float22half2_rn(srcF[0]);
dstH[1] = __float22half2_rn(srcF[1]);
dstH[2] = __float22half2_rn(srcF[2]);
dstH[3] = __float22half2_rn(srcF[3]);
continue;
}
}
*dst = {0, 0, 0, 0};
}
}
__global__ void Im2Col1x1_OPT(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const int maxCount,
const float* A,
half* AP,
const int ePack,
const int eShift,
DivModFast eAlignD,
DivModFast owD,
DivModFast ohD
) {
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
int index = indexO >> 3;
int lR = indexO & 7;
int eIndex, lIndex;
eAlignD.divmod(index, lIndex, eIndex);
int eU = eIndex >> eShift;
int eR = eIndex & (ePack-1);
int dstOffset = ((eU * matmulParam->elhPack[1] + lIndex) << (4+eShift)) + (eR << 4) + (lR << 1);
int offset = lIndex * param->srcZStep + (eIndex << 4) + (lR << 1);
float2* srcF = (float2*)(A + offset);
half2* dstH = (half2*)(AP + dstOffset);
dstH[0] = __float22half2_rn(srcF[0]);
}
}
__global__ void Im2Col(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const int iBlock,
const float* A,
half* AP,
const int ePack,
const int eShift
) {
int eAlign = matmulParam->elhPack[0] * ePack;
int lAlign = matmulParam->elhPack[1];
int maxCount = eAlign * lAlign * BLOCK_INT4;
int kernelCount = param->kernelX * param->kernelY;
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
int index = indexO / BLOCK_INT4;
int lR = indexO % BLOCK_INT4;
int eIndex = index % eAlign;
int lIndex = index / eAlign;
int eU = eIndex / ePack;
int eR = eIndex % ePack;
int dstOffset = eU * matmulParam->elhPack[1] * (ePack * MATMULPACK) + lIndex * (ePack * MATMULPACK) + eR * MATMULPACK + lR * 8;
int4* dst = (int4*)(AP + dstOffset);
eIndex += (iBlock*matmulParam->elhPack[0]*ePack);
if (eIndex >= matmulParam->elh[0]) {
*dst = {0, 0, 0, 0};
continue;
}
// Compute for source
int ox = eIndex % param->ow;
int oy = eIndex / param->ow;
int ob = oy / param->oh;
oy = oy % param->oh;
int sz = lIndex / kernelCount;
int kI = lIndex % kernelCount;
int ksx = kI % param->kernelX;
int ksy = kI / param->kernelX;
int sx = ox * param->strideX + ksx * param->dilateX - param->padX;
int sy = oy * param->strideY + ksy * param->dilateY - param->padY;
if (sx >= 0 && sx < param->iw) {
if (sy >=0 && sy < param->ih) {
int offset = sz * param->srcZStep + (ob * param->iw * param->ih + sy * param->iw + sx) * PACK_NUMBER + lR * 8;
float2* srcF = (float2*)(A + offset);
half2* dstH = (half2*)dst;
dstH[0] = __float22half2_rn(srcF[0]);
dstH[1] = __float22half2_rn(srcF[1]);
dstH[2] = __float22half2_rn(srcF[2]);
dstH[3] = __float22half2_rn(srcF[3]);
continue;
}
}
*dst = {0, 0, 0, 0};
}
}
__global__ void Im2Col1x1_half(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const half* A,
half* AP,
const int ePack,
const int eShift,
DivModFast eAlignD,
DivModFast owD,
DivModFast ohD
) {
int eAlign = matmulParam->elhPack[0] * ePack;
int lAlign = matmulParam->elhPack[1];
int maxCount = eAlign * lAlign * BLOCK_INT4;
int kernelCount = 1;
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
int index = indexO / BLOCK_INT4;
int lR = indexO % BLOCK_INT4;
int eIndex, lIndex;
eAlignD.divmod(index, lIndex, eIndex);
int eU = eIndex / ePack;
int eR = eIndex % ePack;
int dstOffset = eU * matmulParam->elhPack[1] * (ePack * MATMULPACK) + lIndex * (ePack * MATMULPACK) + eR * MATMULPACK + lR * 8;
int4* dst = (int4*)(AP + dstOffset);
if (eIndex >= matmulParam->elh[0]) {
*dst = {0, 0, 0, 0};
continue;
}
// Compute for source
int ox, oy, ob;
owD.divmod(eIndex, oy, ox);
ohD.divmod(oy, ob, oy);
int sz = lIndex;
int sx = ox * param->strideX - param->padX;
int sy = oy * param->strideY - param->padY;
if (sx >= 0 && sx < param->iw) {
if (sy >=0 && sy < param->ih) {
int offset = sz * param->srcZStep + (ob * param->iw * param->ih + sy * param->iw + sx) * PACK_NUMBER + lR * 8;
int4* src = (int4*)(A + offset);
*dst = *src;
continue;
}
}
*dst = {0, 0, 0, 0};
}
}
__global__ void Im2Col1x1_half_OPT(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const int maxCount,
const half* A,
half* AP,
const int ePack,
const int eShift,
DivModFast eAlignD,
DivModFast owD,
DivModFast ohD
) {
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
size_t index = indexO >> 3;
size_t lR = indexO & 7;
int eIndex, lIndex;
eAlignD.divmod(index, lIndex, eIndex);
size_t eU = eIndex >> eShift;
size_t eR = eIndex & (ePack-1);
size_t dstOffset = ((eU * (size_t)matmulParam->elhPack[1] + (size_t)lIndex) << (4+eShift)) + (eR << 4) + (lR << 1);
size_t offset = (size_t)lIndex * (size_t)param->srcZStep + ((size_t)eIndex << 4) + (lR << 1);
int* srcF = (int*)(A + offset);
int* dstH = (int*)(AP + dstOffset);
dstH[0] = srcF[0];
}
}
__global__ void Im2Col_half(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const int maxCount,
const int iBlock,
const half* A,
half* AP,
const int ePack,
const int eShift,
DivModFast d_eA,
DivModFast d_ow,
DivModFast d_oh,
DivModFast d_fxy,
DivModFast d_fx
) {
int eAlign = matmulParam->elhPack[0] << eShift;
int lAlign = matmulParam->elhPack[1];
int kernelCount = param->kernelX * param->kernelY;
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
size_t index = indexO >> 1;
size_t lR = indexO & 1;
int eIndex, lIndex;
d_eA.divmod(index, lIndex, eIndex);
size_t eU = eIndex >> eShift;
size_t eR = eIndex & (ePack-1);
size_t dstOffset = ((((eU * matmulParam->elhPack[1] + lIndex) << eShift) + eR) << 4) + (lR << 3);
int4* dst = (int4*)(AP + dstOffset);
eIndex += (iBlock*matmulParam->elhPack[0]*ePack);
if (eIndex >= matmulParam->elh[0]) {
*dst = {0, 0, 0, 0};
continue;
}
// Compute for source
int ox, oby, ob, oy, sz, kI, ksx, ksy;
d_ow.divmod(eIndex, oby, ox);
d_oh.divmod(oby, ob, oy);
d_fxy.divmod(lIndex, sz, kI);
d_fx.divmod(kI, ksy, ksx);
size_t sx = ox * param->strideX + ksx * param->dilateX - param->padX;
size_t sy = oy * param->strideY + ksy * param->dilateY - param->padY;
if (sx >= 0 && sx < param->iw) {
if (sy >=0 && sy < param->ih) {
size_t offset = sz * param->srcZStep + (((ob * param->ih + sy) * param->iw + sx) << 4) + lR * 8;
int4* src = (int4*)(A + offset);
*dst = *src;
continue;
}
}
*dst = {0, 0, 0, 0};
}
}
__global__ void Im2Col_half_OPT(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const size_t maxCount,
const int iBlock,
const half* A,
half* AP,
const int ePack,
const int eShift,
DivModFast d_eA,
DivModFast d_ow,
DivModFast d_oh,
DivModFast d_fxy,
DivModFast d_fx
) {
size_t eAlign = matmulParam->elhPack[0] << eShift;
size_t lAlign = matmulParam->elhPack[1];
size_t kernelCount = param->kernelX * param->kernelY;
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
size_t index = indexO >> 2;
size_t lR = indexO & 3;
int eIndex, lIndex;
d_eA.divmod(index, lIndex, eIndex);
size_t eU = eIndex >> eShift;
size_t eR = eIndex & (ePack-1);
eIndex += (iBlock*matmulParam->elhPack[0]*ePack);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex) << eShift) + eR) << 4) + (lR << 2);
int2* dst = (int2*)(AP + dstOffset);
if (eIndex >= matmulParam->elh[0]) {
*dst = {0, 0};
continue;
}
// Compute for source
int ox, oby, ob, oy, sz, kI, ksx, ksy;
d_ow.divmod(eIndex, oby, ox);
d_oh.divmod(oby, ob, oy);
d_fxy.divmod(lIndex, sz, kI);
d_fx.divmod(kI, ksy, ksx);
size_t sx = ox * param->strideX + ksx * param->dilateX - param->padX;
size_t sy = oy * param->strideY + ksy * param->dilateY - param->padY;
if (sx >= 0 && sx < param->iw) {
if (sy >=0 && sy < param->ih) {
size_t offset = sz * param->srcZStep + (((ob * param->ih + sy) * param->iw + sx) << 4) + (lR << 2);
int2* src = (int2*)(A + offset);
*dst = *src;
continue;
}
}
*dst = {0, 0};
}
}
__global__ void Im2Col_half_3x3S1D1P1_OPT2(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const size_t maxCount,
const int iBlock,
const half* A,
half* AP,
const int ePack,
const int eShift,
DivModFast d_eA,
DivModFast d_ow,
DivModFast d_oh
) {
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
size_t index = indexO >> 3;
size_t lR = indexO & 7;
int eIndex, lIndex;
d_eA.divmod(index, lIndex, eIndex);
if (eIndex >= matmulParam->elh[0]) {
continue;
}
int ix, oby, ob, iy;
d_ow.divmod(eIndex, oby, ix);
d_oh.divmod(oby, ob, iy);
size_t sz = lIndex;
size_t offset = sz * param->srcZStep + (((ob * param->ih + iy) * param->iw + ix) << 4) + (lR << 1);
int src = *((int*)(A + offset));
// Pixel (iy-1, ix-1)
if(iy-1 >=0 && ix-1 >=0) {
size_t oeIndex = (ob * param->ih * param->iw + (iy-1) * param->iw + (ix-1));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 8) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy-1 ==0) {
size_t index[3] = {0, 1, 2};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix-1 ==0) {
size_t index[3] = {0, 3, 6};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
// Pixel (iy-1, ix+0)
if(iy-1 >=0) {
size_t oeIndex = (ob * param->ih * param->iw + (iy-1) * param->iw + (ix+0));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 7) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy-1 ==0) {
size_t index[3] = {0, 1, 2};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix ==0) {
size_t index[3] = {0, 3, 6};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix == param->iw-1) {
size_t index[3] = {2, 5, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
// Pixel (iy-1, ix+1)
if(iy-1 >=0 && ix+1 < param->iw) {
size_t oeIndex = (ob * param->ih * param->iw + (iy-1) * param->iw + (ix+1));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 6) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy-1 ==0) {
size_t index[3] = {0, 1, 2};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix+1 == param->iw-1) {
size_t index[3] = {2, 5, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
// Pixel (iy+0, ix-1)
if(ix-1 >=0) {
size_t oeIndex = (ob * param->ih * param->iw + (iy+0) * param->iw + (ix-1));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 5) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy ==0) {
size_t index[3] = {0, 1, 2};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(iy == param->ih-1) {
size_t index[3] = {6, 7, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix-1 ==0) {
size_t index[3] = {0, 3, 6};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
// Pixel (iy, ix)
if(1) {
size_t oeIndex = (ob * param->ih * param->iw + (iy+0) * param->iw + (ix+0));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 4) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy ==0) {
size_t index[3] = {0, 1, 2};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(iy == param->ih-1) {
size_t index[3] = {6, 7, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix ==0) {
size_t index[3] = {0, 3, 6};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix == param->iw-1) {
size_t index[3] = {2, 5, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
// Pixel (iy, ix+1)
if(ix+1 < param->iw) {
size_t oeIndex = (ob * param->ih * param->iw + (iy+0) * param->iw + (ix+1));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 3) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy ==0) {
size_t index[3] = {0, 1, 2};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(iy == param->ih-1) {
size_t index[3] = {6, 7, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix+1 == param->iw-1) {
size_t index[3] = {2, 5, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
// Pixel (iy+1, ix-1)
if(iy+1 < param->ih && ix-1 >=0) {
size_t oeIndex = (ob * param->ih * param->iw + (iy+1) * param->iw + (ix-1));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 2) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy+1 == param->ih-1) {
size_t index[3] = {6, 7, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix-1 ==0) {
size_t index[3] = {0, 3, 6};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
// Pixel (iy+1, ix)
if(iy+1 < param->ih) {
size_t oeIndex = (ob * param->ih * param->iw + (iy+1) * param->iw + (ix+0));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 1) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy+1 == param->ih-1) {
size_t index[3] = {6, 7, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix ==0) {
size_t index[3] = {0, 3, 6};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix == param->iw-1) {
size_t index[3] = {2, 5, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
//Pixel (iy+1, ix+1)
if(iy+1 < param->ih && ix+1 < param->iw) {
size_t oeIndex = (ob * param->ih * param->iw + (iy+1) * param->iw + (ix+1));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 0) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy+1 == param->ih-1) {
size_t index[3] = {6, 7, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix+1 == param->iw-1) {
size_t index[3] = {2, 5, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
}
}
void Im2ColMain(CUDARuntime* runtime, const MatMulParam* cpuMatlMul, const MatMulParam* gpuMatMul, const ConvolutionCommon::Im2ColParameter* cpuIm2Col, const ConvolutionCommon::Im2ColParameter* gpuIm2Col,\
const Tensor* input, __half* mIm2ColBuffer, int ePack, int eShift, int bytes, int iBlock) {
const void *input_addr = (const void*)input->deviceId();
size_t eAlign = cpuMatlMul->elhPack[0] * ePack;
size_t lAlign = cpuMatlMul->elhPack[1];
DivModFast eAlignD(eAlign);
DivModFast owD(cpuIm2Col->ow);
DivModFast ohD(cpuIm2Col->oh);
if (cpuIm2Col->kernelX == 1 && cpuIm2Col->kernelY == 1 && \
cpuMatlMul->elh[0] % 16 == 0 && \
cpuIm2Col->strideX == 1 && cpuIm2Col->strideY == 1 && \
cpuIm2Col->dilateX == 1 && cpuIm2Col->dilateY == 1 && \
cpuIm2Col->padX == 0 && cpuIm2Col->padY == 0) {
size_t maxCount = eAlign * lAlign * 8;//Align 2
int block_num = runtime->blocks_num(maxCount);
int block_size = runtime->threads_num();
if(bytes == 4) {
hipLaunchKernelGGL(( Im2Col1x1_OPT), dim3(block_num), dim3(block_size), 0, 0, gpuIm2Col, gpuMatMul, maxCount,
(const float*)input_addr, mIm2ColBuffer, ePack, eShift, eAlignD, owD, ohD);
checkKernelErrors;
} else {
hipLaunchKernelGGL(( Im2Col1x1_half_OPT), dim3(block_num), dim3(block_size), 0, 0, gpuIm2Col, gpuMatMul, maxCount,
(const half*)input_addr, mIm2ColBuffer, ePack, eShift, eAlignD, owD, ohD);
checkKernelErrors;
}
} else if (cpuIm2Col->kernelX == 1 && cpuIm2Col->kernelY == 1) {
size_t maxCount = eAlign * lAlign * 2;//Align 8
int block_num = runtime->blocks_num(maxCount);
int block_size = runtime->threads_num();
if(bytes == 4) {
hipLaunchKernelGGL(( Im2Col1x1), dim3(block_num), dim3(block_size), 0, 0, gpuIm2Col, gpuMatMul, (const float*)input_addr, mIm2ColBuffer, ePack, eShift, eAlignD, owD, ohD);
checkKernelErrors;
} else {
hipLaunchKernelGGL(( Im2Col1x1_half), dim3(block_num), dim3(block_size), 0, 0, gpuIm2Col, gpuMatMul, (const half*)input_addr, mIm2ColBuffer, ePack, eShift, eAlignD, owD, ohD);
checkKernelErrors;
}
} else if(eAlign == cpuMatlMul->elh[0] && iBlock == 0 && \
cpuIm2Col->kernelX == 3 && cpuIm2Col->kernelY == 3 && \
cpuMatlMul->elh[0] % 16 == 0 && \
cpuIm2Col->strideX == 1 && cpuIm2Col->strideY == 1 && \
cpuIm2Col->dilateX == 1 && cpuIm2Col->dilateY == 1 && \
cpuIm2Col->padX == 1 && cpuIm2Col->padY == 1 && \
bytes == 2) {
size_t maxCount = eAlign * (lAlign / 9) * 8;
size_t block_num = runtime->blocks_num(maxCount);
size_t block_size = runtime->threads_num();
// printf("%d: %d-%d-%d-%d-%d, %d-%d\n", iBlock, cpuIm2Col->icDiv4, cpuIm2Col->ih, cpuIm2Col->iw, cpuIm2Col->oh, cpuIm2Col->ow, eAlign, lAlign);
hipLaunchKernelGGL(( Im2Col_half_3x3S1D1P1_OPT2), dim3(block_num), dim3(block_size), 0, 0, gpuIm2Col, gpuMatMul, maxCount, iBlock, (const half*)input_addr, mIm2ColBuffer,\
ePack, eShift, eAlignD, owD, ohD);
checkKernelErrors;
} else {
size_t maxCount = eAlign * lAlign * 2;
size_t block_num = runtime->blocks_num(maxCount);
size_t block_size = runtime->threads_num();
if(bytes == 4) {
hipLaunchKernelGGL(( Im2Col), dim3(block_num), dim3(block_size), 0, 0, gpuIm2Col, gpuMatMul, iBlock, (const float*)input_addr, mIm2ColBuffer, ePack, eShift);
checkKernelErrors;
} else {
//printf("%d-%d-%d-%d-%d, %d-%d\n", cpuIm2Col->icDiv4, cpuIm2Col->ih, cpuIm2Col->iw, cpuIm2Col->oh, cpuIm2Col->ow, eAlign, lAlign);
DivModFast fxyD((cpuIm2Col->kernelX*cpuIm2Col->kernelY));
DivModFast fxD(cpuIm2Col->kernelX);
maxCount = eAlign * lAlign * 4;
block_num = runtime->blocks_num(maxCount);
block_size = runtime->threads_num();
//Im2Col_half<<<block_num, block_size>>>(gpuIm2Col, gpuMatMul, maxCount, (const half*)input_addr, mIm2ColBuffer, eAlignD, owD, ohD, fxyD, fxD);
hipLaunchKernelGGL(( Im2Col_half_OPT), dim3(block_num), dim3(block_size), 0, 0, gpuIm2Col, gpuMatMul, maxCount, iBlock, (const half*)input_addr, mIm2ColBuffer, \
ePack, eShift, eAlignD, owD, ohD, fxyD, fxD);
checkKernelErrors;
}
}
}
} // namespace CUDA
} // namespace MNN
|
76b6ab3bd477272aaf47f67b4b9aa87d4bfba617.cu
|
#include "ImageColumn.cuh"
#include "MNNCUDADefine.hpp"
#include "MNNCUDAFunction.cuh"
#include "Raster.cuh"
#define BLOCK_INT4 2
namespace MNN {
namespace CUDA {
__global__ void Im2Col1x1(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const float* A,
half* AP,
const int ePack,
const int eShift,
DivModFast eAlignD,
DivModFast owD,
DivModFast ohD
) {
int eAlign = matmulParam->elhPack[0] * ePack;
int lAlign = matmulParam->elhPack[1];
int maxCount = eAlign * lAlign * BLOCK_INT4;
int kernelCount = 1;
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
int index = indexO >> 1;
int lR = indexO & 1;
int eIndex, lIndex;
eAlignD.divmod(index, lIndex, eIndex);
int eU = eIndex >> eShift;
int eR = eIndex & (ePack-1);
int dstOffset = eU * matmulParam->elhPack[1] * (ePack * MATMULPACK) + lIndex * (ePack * MATMULPACK) + eR * MATMULPACK + lR * 8;
int4* dst = (int4*)(AP + dstOffset);
if (eIndex >= matmulParam->elh[0]) {
*dst = {0, 0, 0, 0};
continue;
}
// Compute for source
int ox, oy, ob;
owD.divmod(eIndex, oy, ox);
ohD.divmod(oy, ob, oy);
int sz = lIndex;
int sx = ox * param->strideX - param->padX;
int sy = oy * param->strideY - param->padY;
if (sx >= 0 && sx < param->iw) {
if (sy >=0 && sy < param->ih) {
int offset = sz * param->srcZStep + (ob * param->iw * param->ih + sy * param->iw + sx) * PACK_NUMBER + lR * 8;
float2* srcF = (float2*)(A + offset);
half2* dstH = (half2*)dst;
dstH[0] = __float22half2_rn(srcF[0]);
dstH[1] = __float22half2_rn(srcF[1]);
dstH[2] = __float22half2_rn(srcF[2]);
dstH[3] = __float22half2_rn(srcF[3]);
continue;
}
}
*dst = {0, 0, 0, 0};
}
}
__global__ void Im2Col1x1_OPT(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const int maxCount,
const float* A,
half* AP,
const int ePack,
const int eShift,
DivModFast eAlignD,
DivModFast owD,
DivModFast ohD
) {
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
int index = indexO >> 3;
int lR = indexO & 7;
int eIndex, lIndex;
eAlignD.divmod(index, lIndex, eIndex);
int eU = eIndex >> eShift;
int eR = eIndex & (ePack-1);
int dstOffset = ((eU * matmulParam->elhPack[1] + lIndex) << (4+eShift)) + (eR << 4) + (lR << 1);
int offset = lIndex * param->srcZStep + (eIndex << 4) + (lR << 1);
float2* srcF = (float2*)(A + offset);
half2* dstH = (half2*)(AP + dstOffset);
dstH[0] = __float22half2_rn(srcF[0]);
}
}
__global__ void Im2Col(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const int iBlock,
const float* A,
half* AP,
const int ePack,
const int eShift
) {
int eAlign = matmulParam->elhPack[0] * ePack;
int lAlign = matmulParam->elhPack[1];
int maxCount = eAlign * lAlign * BLOCK_INT4;
int kernelCount = param->kernelX * param->kernelY;
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
int index = indexO / BLOCK_INT4;
int lR = indexO % BLOCK_INT4;
int eIndex = index % eAlign;
int lIndex = index / eAlign;
int eU = eIndex / ePack;
int eR = eIndex % ePack;
int dstOffset = eU * matmulParam->elhPack[1] * (ePack * MATMULPACK) + lIndex * (ePack * MATMULPACK) + eR * MATMULPACK + lR * 8;
int4* dst = (int4*)(AP + dstOffset);
eIndex += (iBlock*matmulParam->elhPack[0]*ePack);
if (eIndex >= matmulParam->elh[0]) {
*dst = {0, 0, 0, 0};
continue;
}
// Compute for source
int ox = eIndex % param->ow;
int oy = eIndex / param->ow;
int ob = oy / param->oh;
oy = oy % param->oh;
int sz = lIndex / kernelCount;
int kI = lIndex % kernelCount;
int ksx = kI % param->kernelX;
int ksy = kI / param->kernelX;
int sx = ox * param->strideX + ksx * param->dilateX - param->padX;
int sy = oy * param->strideY + ksy * param->dilateY - param->padY;
if (sx >= 0 && sx < param->iw) {
if (sy >=0 && sy < param->ih) {
int offset = sz * param->srcZStep + (ob * param->iw * param->ih + sy * param->iw + sx) * PACK_NUMBER + lR * 8;
float2* srcF = (float2*)(A + offset);
half2* dstH = (half2*)dst;
dstH[0] = __float22half2_rn(srcF[0]);
dstH[1] = __float22half2_rn(srcF[1]);
dstH[2] = __float22half2_rn(srcF[2]);
dstH[3] = __float22half2_rn(srcF[3]);
continue;
}
}
*dst = {0, 0, 0, 0};
}
}
__global__ void Im2Col1x1_half(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const half* A,
half* AP,
const int ePack,
const int eShift,
DivModFast eAlignD,
DivModFast owD,
DivModFast ohD
) {
int eAlign = matmulParam->elhPack[0] * ePack;
int lAlign = matmulParam->elhPack[1];
int maxCount = eAlign * lAlign * BLOCK_INT4;
int kernelCount = 1;
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
int index = indexO / BLOCK_INT4;
int lR = indexO % BLOCK_INT4;
int eIndex, lIndex;
eAlignD.divmod(index, lIndex, eIndex);
int eU = eIndex / ePack;
int eR = eIndex % ePack;
int dstOffset = eU * matmulParam->elhPack[1] * (ePack * MATMULPACK) + lIndex * (ePack * MATMULPACK) + eR * MATMULPACK + lR * 8;
int4* dst = (int4*)(AP + dstOffset);
if (eIndex >= matmulParam->elh[0]) {
*dst = {0, 0, 0, 0};
continue;
}
// Compute for source
int ox, oy, ob;
owD.divmod(eIndex, oy, ox);
ohD.divmod(oy, ob, oy);
int sz = lIndex;
int sx = ox * param->strideX - param->padX;
int sy = oy * param->strideY - param->padY;
if (sx >= 0 && sx < param->iw) {
if (sy >=0 && sy < param->ih) {
int offset = sz * param->srcZStep + (ob * param->iw * param->ih + sy * param->iw + sx) * PACK_NUMBER + lR * 8;
int4* src = (int4*)(A + offset);
*dst = *src;
continue;
}
}
*dst = {0, 0, 0, 0};
}
}
__global__ void Im2Col1x1_half_OPT(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const int maxCount,
const half* A,
half* AP,
const int ePack,
const int eShift,
DivModFast eAlignD,
DivModFast owD,
DivModFast ohD
) {
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
size_t index = indexO >> 3;
size_t lR = indexO & 7;
int eIndex, lIndex;
eAlignD.divmod(index, lIndex, eIndex);
size_t eU = eIndex >> eShift;
size_t eR = eIndex & (ePack-1);
size_t dstOffset = ((eU * (size_t)matmulParam->elhPack[1] + (size_t)lIndex) << (4+eShift)) + (eR << 4) + (lR << 1);
size_t offset = (size_t)lIndex * (size_t)param->srcZStep + ((size_t)eIndex << 4) + (lR << 1);
int* srcF = (int*)(A + offset);
int* dstH = (int*)(AP + dstOffset);
dstH[0] = srcF[0];
}
}
__global__ void Im2Col_half(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const int maxCount,
const int iBlock,
const half* A,
half* AP,
const int ePack,
const int eShift,
DivModFast d_eA,
DivModFast d_ow,
DivModFast d_oh,
DivModFast d_fxy,
DivModFast d_fx
) {
int eAlign = matmulParam->elhPack[0] << eShift;
int lAlign = matmulParam->elhPack[1];
int kernelCount = param->kernelX * param->kernelY;
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
size_t index = indexO >> 1;
size_t lR = indexO & 1;
int eIndex, lIndex;
d_eA.divmod(index, lIndex, eIndex);
size_t eU = eIndex >> eShift;
size_t eR = eIndex & (ePack-1);
size_t dstOffset = ((((eU * matmulParam->elhPack[1] + lIndex) << eShift) + eR) << 4) + (lR << 3);
int4* dst = (int4*)(AP + dstOffset);
eIndex += (iBlock*matmulParam->elhPack[0]*ePack);
if (eIndex >= matmulParam->elh[0]) {
*dst = {0, 0, 0, 0};
continue;
}
// Compute for source
int ox, oby, ob, oy, sz, kI, ksx, ksy;
d_ow.divmod(eIndex, oby, ox);
d_oh.divmod(oby, ob, oy);
d_fxy.divmod(lIndex, sz, kI);
d_fx.divmod(kI, ksy, ksx);
size_t sx = ox * param->strideX + ksx * param->dilateX - param->padX;
size_t sy = oy * param->strideY + ksy * param->dilateY - param->padY;
if (sx >= 0 && sx < param->iw) {
if (sy >=0 && sy < param->ih) {
size_t offset = sz * param->srcZStep + (((ob * param->ih + sy) * param->iw + sx) << 4) + lR * 8;
int4* src = (int4*)(A + offset);
*dst = *src;
continue;
}
}
*dst = {0, 0, 0, 0};
}
}
__global__ void Im2Col_half_OPT(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const size_t maxCount,
const int iBlock,
const half* A,
half* AP,
const int ePack,
const int eShift,
DivModFast d_eA,
DivModFast d_ow,
DivModFast d_oh,
DivModFast d_fxy,
DivModFast d_fx
) {
size_t eAlign = matmulParam->elhPack[0] << eShift;
size_t lAlign = matmulParam->elhPack[1];
size_t kernelCount = param->kernelX * param->kernelY;
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
size_t index = indexO >> 2;
size_t lR = indexO & 3;
int eIndex, lIndex;
d_eA.divmod(index, lIndex, eIndex);
size_t eU = eIndex >> eShift;
size_t eR = eIndex & (ePack-1);
eIndex += (iBlock*matmulParam->elhPack[0]*ePack);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex) << eShift) + eR) << 4) + (lR << 2);
int2* dst = (int2*)(AP + dstOffset);
if (eIndex >= matmulParam->elh[0]) {
*dst = {0, 0};
continue;
}
// Compute for source
int ox, oby, ob, oy, sz, kI, ksx, ksy;
d_ow.divmod(eIndex, oby, ox);
d_oh.divmod(oby, ob, oy);
d_fxy.divmod(lIndex, sz, kI);
d_fx.divmod(kI, ksy, ksx);
size_t sx = ox * param->strideX + ksx * param->dilateX - param->padX;
size_t sy = oy * param->strideY + ksy * param->dilateY - param->padY;
if (sx >= 0 && sx < param->iw) {
if (sy >=0 && sy < param->ih) {
size_t offset = sz * param->srcZStep + (((ob * param->ih + sy) * param->iw + sx) << 4) + (lR << 2);
int2* src = (int2*)(A + offset);
*dst = *src;
continue;
}
}
*dst = {0, 0};
}
}
__global__ void Im2Col_half_3x3S1D1P1_OPT2(const ConvolutionCommon::Im2ColParameter* param,
const MatMulParam* matmulParam,
const size_t maxCount,
const int iBlock,
const half* A,
half* AP,
const int ePack,
const int eShift,
DivModFast d_eA,
DivModFast d_ow,
DivModFast d_oh
) {
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
size_t index = indexO >> 3;
size_t lR = indexO & 7;
int eIndex, lIndex;
d_eA.divmod(index, lIndex, eIndex);
if (eIndex >= matmulParam->elh[0]) {
continue;
}
int ix, oby, ob, iy;
d_ow.divmod(eIndex, oby, ix);
d_oh.divmod(oby, ob, iy);
size_t sz = lIndex;
size_t offset = sz * param->srcZStep + (((ob * param->ih + iy) * param->iw + ix) << 4) + (lR << 1);
int src = *((int*)(A + offset));
// Pixel (iy-1, ix-1)
if(iy-1 >=0 && ix-1 >=0) {
size_t oeIndex = (ob * param->ih * param->iw + (iy-1) * param->iw + (ix-1));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 8) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy-1 ==0) {
size_t index[3] = {0, 1, 2};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix-1 ==0) {
size_t index[3] = {0, 3, 6};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
// Pixel (iy-1, ix+0)
if(iy-1 >=0) {
size_t oeIndex = (ob * param->ih * param->iw + (iy-1) * param->iw + (ix+0));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 7) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy-1 ==0) {
size_t index[3] = {0, 1, 2};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix ==0) {
size_t index[3] = {0, 3, 6};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix == param->iw-1) {
size_t index[3] = {2, 5, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
// Pixel (iy-1, ix+1)
if(iy-1 >=0 && ix+1 < param->iw) {
size_t oeIndex = (ob * param->ih * param->iw + (iy-1) * param->iw + (ix+1));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 6) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy-1 ==0) {
size_t index[3] = {0, 1, 2};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix+1 == param->iw-1) {
size_t index[3] = {2, 5, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
// Pixel (iy+0, ix-1)
if(ix-1 >=0) {
size_t oeIndex = (ob * param->ih * param->iw + (iy+0) * param->iw + (ix-1));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 5) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy ==0) {
size_t index[3] = {0, 1, 2};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(iy == param->ih-1) {
size_t index[3] = {6, 7, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix-1 ==0) {
size_t index[3] = {0, 3, 6};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
// Pixel (iy, ix)
if(1) {
size_t oeIndex = (ob * param->ih * param->iw + (iy+0) * param->iw + (ix+0));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 4) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy ==0) {
size_t index[3] = {0, 1, 2};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(iy == param->ih-1) {
size_t index[3] = {6, 7, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix ==0) {
size_t index[3] = {0, 3, 6};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix == param->iw-1) {
size_t index[3] = {2, 5, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
// Pixel (iy, ix+1)
if(ix+1 < param->iw) {
size_t oeIndex = (ob * param->ih * param->iw + (iy+0) * param->iw + (ix+1));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 3) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy ==0) {
size_t index[3] = {0, 1, 2};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(iy == param->ih-1) {
size_t index[3] = {6, 7, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix+1 == param->iw-1) {
size_t index[3] = {2, 5, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
// Pixel (iy+1, ix-1)
if(iy+1 < param->ih && ix-1 >=0) {
size_t oeIndex = (ob * param->ih * param->iw + (iy+1) * param->iw + (ix-1));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 2) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy+1 == param->ih-1) {
size_t index[3] = {6, 7, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix-1 ==0) {
size_t index[3] = {0, 3, 6};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
// Pixel (iy+1, ix)
if(iy+1 < param->ih) {
size_t oeIndex = (ob * param->ih * param->iw + (iy+1) * param->iw + (ix+0));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 1) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy+1 == param->ih-1) {
size_t index[3] = {6, 7, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix ==0) {
size_t index[3] = {0, 3, 6};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix == param->iw-1) {
size_t index[3] = {2, 5, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
//Pixel (iy+1, ix+1)
if(iy+1 < param->ih && ix+1 < param->iw) {
size_t oeIndex = (ob * param->ih * param->iw + (iy+1) * param->iw + (ix+1));
size_t eU = oeIndex >> eShift;
size_t eR = oeIndex & (ePack-1);
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + 0) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = src;
// Corner case
if(iy+1 == param->ih-1) {
size_t index[3] = {6, 7, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
if(ix+1 == param->iw-1) {
size_t index[3] = {2, 5, 8};
for(size_t i=0; i<3; i++) {
size_t dstOffset = ((((eU * (size_t)matmulParam->elhPack[1] + lIndex*9 + index[i]) << eShift) + eR) << 4) + (lR << 1);
int* dst = (int*)(AP + dstOffset);
*dst = 0;
}
}
}
}
}
void Im2ColMain(CUDARuntime* runtime, const MatMulParam* cpuMatlMul, const MatMulParam* gpuMatMul, const ConvolutionCommon::Im2ColParameter* cpuIm2Col, const ConvolutionCommon::Im2ColParameter* gpuIm2Col,\
const Tensor* input, __half* mIm2ColBuffer, int ePack, int eShift, int bytes, int iBlock) {
const void *input_addr = (const void*)input->deviceId();
size_t eAlign = cpuMatlMul->elhPack[0] * ePack;
size_t lAlign = cpuMatlMul->elhPack[1];
DivModFast eAlignD(eAlign);
DivModFast owD(cpuIm2Col->ow);
DivModFast ohD(cpuIm2Col->oh);
if (cpuIm2Col->kernelX == 1 && cpuIm2Col->kernelY == 1 && \
cpuMatlMul->elh[0] % 16 == 0 && \
cpuIm2Col->strideX == 1 && cpuIm2Col->strideY == 1 && \
cpuIm2Col->dilateX == 1 && cpuIm2Col->dilateY == 1 && \
cpuIm2Col->padX == 0 && cpuIm2Col->padY == 0) {
size_t maxCount = eAlign * lAlign * 8;//Align 2
int block_num = runtime->blocks_num(maxCount);
int block_size = runtime->threads_num();
if(bytes == 4) {
Im2Col1x1_OPT<<<block_num, block_size>>>(gpuIm2Col, gpuMatMul, maxCount,
(const float*)input_addr, mIm2ColBuffer, ePack, eShift, eAlignD, owD, ohD);
checkKernelErrors;
} else {
Im2Col1x1_half_OPT<<<block_num, block_size>>>(gpuIm2Col, gpuMatMul, maxCount,
(const half*)input_addr, mIm2ColBuffer, ePack, eShift, eAlignD, owD, ohD);
checkKernelErrors;
}
} else if (cpuIm2Col->kernelX == 1 && cpuIm2Col->kernelY == 1) {
size_t maxCount = eAlign * lAlign * 2;//Align 8
int block_num = runtime->blocks_num(maxCount);
int block_size = runtime->threads_num();
if(bytes == 4) {
Im2Col1x1<<<block_num, block_size>>>(gpuIm2Col, gpuMatMul, (const float*)input_addr, mIm2ColBuffer, ePack, eShift, eAlignD, owD, ohD);
checkKernelErrors;
} else {
Im2Col1x1_half<<<block_num, block_size>>>(gpuIm2Col, gpuMatMul, (const half*)input_addr, mIm2ColBuffer, ePack, eShift, eAlignD, owD, ohD);
checkKernelErrors;
}
} else if(eAlign == cpuMatlMul->elh[0] && iBlock == 0 && \
cpuIm2Col->kernelX == 3 && cpuIm2Col->kernelY == 3 && \
cpuMatlMul->elh[0] % 16 == 0 && \
cpuIm2Col->strideX == 1 && cpuIm2Col->strideY == 1 && \
cpuIm2Col->dilateX == 1 && cpuIm2Col->dilateY == 1 && \
cpuIm2Col->padX == 1 && cpuIm2Col->padY == 1 && \
bytes == 2) {
size_t maxCount = eAlign * (lAlign / 9) * 8;
size_t block_num = runtime->blocks_num(maxCount);
size_t block_size = runtime->threads_num();
// printf("%d: %d-%d-%d-%d-%d, %d-%d\n", iBlock, cpuIm2Col->icDiv4, cpuIm2Col->ih, cpuIm2Col->iw, cpuIm2Col->oh, cpuIm2Col->ow, eAlign, lAlign);
Im2Col_half_3x3S1D1P1_OPT2<<<block_num, block_size>>>(gpuIm2Col, gpuMatMul, maxCount, iBlock, (const half*)input_addr, mIm2ColBuffer,\
ePack, eShift, eAlignD, owD, ohD);
checkKernelErrors;
} else {
size_t maxCount = eAlign * lAlign * 2;
size_t block_num = runtime->blocks_num(maxCount);
size_t block_size = runtime->threads_num();
if(bytes == 4) {
Im2Col<<<block_num, block_size>>>(gpuIm2Col, gpuMatMul, iBlock, (const float*)input_addr, mIm2ColBuffer, ePack, eShift);
checkKernelErrors;
} else {
//printf("%d-%d-%d-%d-%d, %d-%d\n", cpuIm2Col->icDiv4, cpuIm2Col->ih, cpuIm2Col->iw, cpuIm2Col->oh, cpuIm2Col->ow, eAlign, lAlign);
DivModFast fxyD((cpuIm2Col->kernelX*cpuIm2Col->kernelY));
DivModFast fxD(cpuIm2Col->kernelX);
maxCount = eAlign * lAlign * 4;
block_num = runtime->blocks_num(maxCount);
block_size = runtime->threads_num();
//Im2Col_half<<<block_num, block_size>>>(gpuIm2Col, gpuMatMul, maxCount, (const half*)input_addr, mIm2ColBuffer, eAlignD, owD, ohD, fxyD, fxD);
Im2Col_half_OPT<<<block_num, block_size>>>(gpuIm2Col, gpuMatMul, maxCount, iBlock, (const half*)input_addr, mIm2ColBuffer, \
ePack, eShift, eAlignD, owD, ohD, fxyD, fxD);
checkKernelErrors;
}
}
}
} // namespace CUDA
} // namespace MNN
|
2998309a3007a18da347de9f7c074399c956e3bb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#define TILE_WIDTH 2
__global__ void matMulKernel(float* d_N, float* d_M, float* d_P, int Width){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the d_M and d_N tiles required to compute d_P element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {// Coolaborative loading of d_M and d_N tiles into shared memory
Mds[ty][tx] = d_M[Row*Width + m*TILE_WIDTH + tx];
Nds[ty][tx] = d_N[(m*TILE_WIDTH + ty)*Width + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
d_P[Row*Width + Col] = Pvalue;
}
void matMul(float* A, float* B, float* C, int width)
{
int size = width * width * sizeof(float);
static float *d_A, *d_B, *d_C;
hipMalloc((void **) &d_A, size);
hipMemcpy(d_A, A, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_B, size);
hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_C, size);
dim3 dimGrid(2, 2, 1);
dim3 dimBlock(2, 2, 1);
hipLaunchKernelGGL(( matMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, width);
hipMemcpy(C, d_C, size, hipMemcpyDeviceToHost);
printf("\nA: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", A[i + j*width]);
}
printf("\n");
}
printf("\nB: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", B[i + j*width]);
}
printf("\n");
}
printf("\n-------------------------------------");
printf("\nC: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", C[i + j*width]);
}
printf("\n");
}
printf("\n-------------------------------------\n");
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
int main() {
int width = 4;
static float h_A[16];
static float h_B[16];
static float h_C[16];
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
h_A[i + j*width] = (i+j)%2;
h_B[i + j*width] = (i+j)%3;
}
}
matMul(h_A, h_B, h_C, width);
}
|
2998309a3007a18da347de9f7c074399c956e3bb.cu
|
#include <cuda.h>
#include <stdio.h>
#define TILE_WIDTH 2
__global__ void matMulKernel(float* d_N, float* d_M, float* d_P, int Width){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the d_M and d_N tiles required to compute d_P element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {// Coolaborative loading of d_M and d_N tiles into shared memory
Mds[ty][tx] = d_M[Row*Width + m*TILE_WIDTH + tx];
Nds[ty][tx] = d_N[(m*TILE_WIDTH + ty)*Width + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
d_P[Row*Width + Col] = Pvalue;
}
void matMul(float* A, float* B, float* C, int width)
{
int size = width * width * sizeof(float);
static float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, size);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_C, size);
dim3 dimGrid(2, 2, 1);
dim3 dimBlock(2, 2, 1);
matMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, width);
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
printf("\nA: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", A[i + j*width]);
}
printf("\n");
}
printf("\nB: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", B[i + j*width]);
}
printf("\n");
}
printf("\n-------------------------------------");
printf("\nC: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", C[i + j*width]);
}
printf("\n");
}
printf("\n-------------------------------------\n");
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main() {
int width = 4;
static float h_A[16];
static float h_B[16];
static float h_C[16];
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
h_A[i + j*width] = (i+j)%2;
h_B[i + j*width] = (i+j)%3;
}
}
matMul(h_A, h_B, h_C, width);
}
|
565e5451fc24946bc0e849ce4fb75d8ac4b35174.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2018 by Contributors
* \file boolean_mask.cu
*/
#include "./boolean_mask-inl.h"
#include <hipcub/hipcub.hpp>
namespace mxnet {
namespace op {
template<>
inline void BooleanMaskForward<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK(req[0] == kWriteTo || req[0] == kWriteInplace);
const BooleanMaskParam& param = nnvm::get<BooleanMaskParam>(attrs.parsed);
const int axis = param.axis;
const NDArray &data = inputs[0];
const NDArray &idx = inputs[1];
const NDArray &out = outputs[0];
CHECK_EQ(axis, 0) << "Not supported yet";
CHECK_EQ(data.shape()[axis], idx.shape()[0]);
CHECK_EQ(idx.shape().ndim(), 1U);
Stream<gpu>* s = ctx.get_stream<gpu>();
hipStream_t stream = Stream<gpu>::GetStream(s);
// count the number of 1s in `idx`, so that we could know the output dimension
size_t idx_size = idx.shape()[0];
int32_t valid_num = 0;
int32_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// Calculate total temporary memory size
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
idx_size,
stream);
size_t buffer_size = idx_size * sizeof(int32_t);
temp_storage_bytes += buffer_size;
// Allocate memory on GPU and allocate pointer
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(temp_storage_bytes), s);
prefix_sum = reinterpret_cast<int32_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + buffer_size;
MSHADOW_TYPE_SWITCH_WITH_BOOL(idx.dtype(), IType, {
mxnet_op::Kernel<mshadow_op::identity_with_cast, gpu>::Launch(
s, idx.shape()[0], prefix_sum, idx.data().dptr<IType>());
});
// Calculate prefix sum
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
idx_size,
stream);
CUDA_CALL(hipMemcpyAsync(&valid_num, &prefix_sum[idx_size - 1], sizeof(int32_t),
hipMemcpyDeviceToHost, stream));
CUDA_CALL(hipStreamSynchronize(stream));
// Set the output shape forcefully
mxnet::TShape data_shape = data.shape();
data_shape[axis] = valid_num;
const_cast<NDArray &>(out).Init(data_shape);
size_t input_size = data.shape().Size();
size_t col_size = input_size / idx.shape()[0];
// Do the copy
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.dtype(), DType, {
if (valid_num > 0) {
mxnet_op::Kernel<BooleanMaskForwardKernel, gpu>::Launch(
s, input_size, out.data().dptr<DType>(), data.data().dptr<DType>(), prefix_sum, col_size);
}
});
}
template<>
inline void BooleanMaskBackward<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U);
if (req[0] == kNullOp) return;
// inputs: {ograd, data, idx}
// outputs: {igrad_data, igrad_idx}
const NDArray& ograd = inputs[0];
const NDArray& idx = inputs[2];
const NDArray& igrad_data = outputs[0];
Stream<gpu>* s = ctx.get_stream<gpu>();
hipStream_t stream = Stream<gpu>::GetStream(s);
// Count the number of 1s in `idx`, so that we could know the output dimension
size_t idx_size = idx.shape()[0];
int32_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// Calculate total temporary memory size
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
idx_size,
stream);
size_t buffer_size = idx_size * sizeof(int32_t);
temp_storage_bytes += buffer_size;
// Allocate memory on GPU and allocate pointer
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(temp_storage_bytes), s);
prefix_sum = reinterpret_cast<int32_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + buffer_size;
MSHADOW_TYPE_SWITCH_WITH_BOOL(idx.dtype(), IType, {
mxnet_op::Kernel<mshadow_op::identity_with_cast, gpu>::Launch(
s, idx.shape()[0], prefix_sum, idx.data().dptr<IType>());
});
// Calculate prefix sum
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
idx_size,
stream);
size_t input_size = igrad_data.shape().Size();
size_t col_size = input_size / idx_size;
// Backward pass
MSHADOW_TYPE_SWITCH(igrad_data.dtype(), DType, {
if (input_size > 0) {
mxnet_op::Kernel<BooleanMaskBackwardKernel, gpu>::Launch(
s, input_size, igrad_data.data().dptr<DType>(), req[0], ograd.data().dptr<DType>(),
prefix_sum, col_size);
}
});
}
NNVM_REGISTER_OP(_contrib_boolean_mask)
.set_attr<FResourceRequest>("FResourceRequest",
[](const NodeAttrs& attrs) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
.set_attr<THasDeterministicOutput>("THasDeterministicOutput", true)
.set_attr<FComputeEx>("FComputeEx<gpu>", BooleanMaskForward<gpu>);
NNVM_REGISTER_OP(_backward_contrib_boolean_mask)
.set_attr<FResourceRequest>("FResourceRequest",
[](const NodeAttrs& attrs) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
.set_attr<FComputeEx>("FComputeEx<gpu>", BooleanMaskBackward<gpu>);
} // namespace op
} // namespace mxnet
|
565e5451fc24946bc0e849ce4fb75d8ac4b35174.cu
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2018 by Contributors
* \file boolean_mask.cu
*/
#include "./boolean_mask-inl.h"
#include <cub/cub.cuh>
namespace mxnet {
namespace op {
template<>
inline void BooleanMaskForward<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK(req[0] == kWriteTo || req[0] == kWriteInplace);
const BooleanMaskParam& param = nnvm::get<BooleanMaskParam>(attrs.parsed);
const int axis = param.axis;
const NDArray &data = inputs[0];
const NDArray &idx = inputs[1];
const NDArray &out = outputs[0];
CHECK_EQ(axis, 0) << "Not supported yet";
CHECK_EQ(data.shape()[axis], idx.shape()[0]);
CHECK_EQ(idx.shape().ndim(), 1U);
Stream<gpu>* s = ctx.get_stream<gpu>();
cudaStream_t stream = Stream<gpu>::GetStream(s);
// count the number of 1s in `idx`, so that we could know the output dimension
size_t idx_size = idx.shape()[0];
int32_t valid_num = 0;
int32_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// Calculate total temporary memory size
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
idx_size,
stream);
size_t buffer_size = idx_size * sizeof(int32_t);
temp_storage_bytes += buffer_size;
// Allocate memory on GPU and allocate pointer
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(temp_storage_bytes), s);
prefix_sum = reinterpret_cast<int32_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + buffer_size;
MSHADOW_TYPE_SWITCH_WITH_BOOL(idx.dtype(), IType, {
mxnet_op::Kernel<mshadow_op::identity_with_cast, gpu>::Launch(
s, idx.shape()[0], prefix_sum, idx.data().dptr<IType>());
});
// Calculate prefix sum
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
idx_size,
stream);
CUDA_CALL(cudaMemcpyAsync(&valid_num, &prefix_sum[idx_size - 1], sizeof(int32_t),
cudaMemcpyDeviceToHost, stream));
CUDA_CALL(cudaStreamSynchronize(stream));
// Set the output shape forcefully
mxnet::TShape data_shape = data.shape();
data_shape[axis] = valid_num;
const_cast<NDArray &>(out).Init(data_shape);
size_t input_size = data.shape().Size();
size_t col_size = input_size / idx.shape()[0];
// Do the copy
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.dtype(), DType, {
if (valid_num > 0) {
mxnet_op::Kernel<BooleanMaskForwardKernel, gpu>::Launch(
s, input_size, out.data().dptr<DType>(), data.data().dptr<DType>(), prefix_sum, col_size);
}
});
}
template<>
inline void BooleanMaskBackward<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U);
if (req[0] == kNullOp) return;
// inputs: {ograd, data, idx}
// outputs: {igrad_data, igrad_idx}
const NDArray& ograd = inputs[0];
const NDArray& idx = inputs[2];
const NDArray& igrad_data = outputs[0];
Stream<gpu>* s = ctx.get_stream<gpu>();
cudaStream_t stream = Stream<gpu>::GetStream(s);
// Count the number of 1s in `idx`, so that we could know the output dimension
size_t idx_size = idx.shape()[0];
int32_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// Calculate total temporary memory size
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
idx_size,
stream);
size_t buffer_size = idx_size * sizeof(int32_t);
temp_storage_bytes += buffer_size;
// Allocate memory on GPU and allocate pointer
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(temp_storage_bytes), s);
prefix_sum = reinterpret_cast<int32_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + buffer_size;
MSHADOW_TYPE_SWITCH_WITH_BOOL(idx.dtype(), IType, {
mxnet_op::Kernel<mshadow_op::identity_with_cast, gpu>::Launch(
s, idx.shape()[0], prefix_sum, idx.data().dptr<IType>());
});
// Calculate prefix sum
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
idx_size,
stream);
size_t input_size = igrad_data.shape().Size();
size_t col_size = input_size / idx_size;
// Backward pass
MSHADOW_TYPE_SWITCH(igrad_data.dtype(), DType, {
if (input_size > 0) {
mxnet_op::Kernel<BooleanMaskBackwardKernel, gpu>::Launch(
s, input_size, igrad_data.data().dptr<DType>(), req[0], ograd.data().dptr<DType>(),
prefix_sum, col_size);
}
});
}
NNVM_REGISTER_OP(_contrib_boolean_mask)
.set_attr<FResourceRequest>("FResourceRequest",
[](const NodeAttrs& attrs) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
.set_attr<THasDeterministicOutput>("THasDeterministicOutput", true)
.set_attr<FComputeEx>("FComputeEx<gpu>", BooleanMaskForward<gpu>);
NNVM_REGISTER_OP(_backward_contrib_boolean_mask)
.set_attr<FResourceRequest>("FResourceRequest",
[](const NodeAttrs& attrs) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
.set_attr<FComputeEx>("FComputeEx<gpu>", BooleanMaskBackward<gpu>);
} // namespace op
} // namespace mxnet
|
c70f426da4d8884ccff4b2ac85ad021220cdf881.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by search_single_cta_00_generate.py
*
* Make changes there and run in this directory:
*
* > python search_single_cta_00_generate.py
*
*/
#include <raft/neighbors/detail/cagra/search_single_cta_kernel-inl.cuh>
namespace raft::neighbors::cagra::detail::single_cta_search {
#define instantiate_single_cta_select_and_run( \
TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T) \
template void select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T>( \
raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \
raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \
INDEX_T* const topk_indices_ptr, \
DISTANCE_T* const topk_distances_ptr, \
const DATA_T* const queries_ptr, \
const uint32_t num_queries, \
const INDEX_T* dev_seed_ptr, \
uint32_t* const num_executed_iterations, \
uint32_t topk, \
uint32_t num_itopk_candidates, \
uint32_t block_size, \
uint32_t smem_size, \
int64_t hash_bitlen, \
INDEX_T* hashmap_ptr, \
size_t small_hash_bitlen, \
size_t small_hash_reset_interval, \
uint32_t num_random_samplings, \
uint64_t rand_xor_mask, \
uint32_t num_seeds, \
size_t itopk_size, \
size_t search_width, \
size_t min_iterations, \
size_t max_iterations, \
hipStream_t stream);
instantiate_single_cta_select_and_run(32, 1024, float, uint64_t, float);
#undef instantiate_single_cta_search_kernel
} // namespace raft::neighbors::cagra::detail::single_cta_search
|
c70f426da4d8884ccff4b2ac85ad021220cdf881.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by search_single_cta_00_generate.py
*
* Make changes there and run in this directory:
*
* > python search_single_cta_00_generate.py
*
*/
#include <raft/neighbors/detail/cagra/search_single_cta_kernel-inl.cuh>
namespace raft::neighbors::cagra::detail::single_cta_search {
#define instantiate_single_cta_select_and_run( \
TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T) \
template void select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T>( \
raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \
raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \
INDEX_T* const topk_indices_ptr, \
DISTANCE_T* const topk_distances_ptr, \
const DATA_T* const queries_ptr, \
const uint32_t num_queries, \
const INDEX_T* dev_seed_ptr, \
uint32_t* const num_executed_iterations, \
uint32_t topk, \
uint32_t num_itopk_candidates, \
uint32_t block_size, \
uint32_t smem_size, \
int64_t hash_bitlen, \
INDEX_T* hashmap_ptr, \
size_t small_hash_bitlen, \
size_t small_hash_reset_interval, \
uint32_t num_random_samplings, \
uint64_t rand_xor_mask, \
uint32_t num_seeds, \
size_t itopk_size, \
size_t search_width, \
size_t min_iterations, \
size_t max_iterations, \
cudaStream_t stream);
instantiate_single_cta_select_and_run(32, 1024, float, uint64_t, float);
#undef instantiate_single_cta_search_kernel
} // namespace raft::neighbors::cagra::detail::single_cta_search
|
fde59e5ca010eff304e431a6eb930d1023be4df7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <iostream>
#include "timer.h"
#include "hip/device_functions.h"
long img_size;
int thread_block_size;
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(hipGetLastError());
*/
static void checkCudaCall(hipError_t result) {
if (result != hipSuccess) {
cerr << "cuda error: " << hipGetErrorString(result) << endl;
exit(1);
}
}
void usage(){
cout << "Usage: \n"
"Set the 'option' flag to \n"
"\t0: for running naive \n"
"\t2: for 1 block with loop \n"
"\t3: for shared memory histogram with reduction \n"
"Change the 'length' flag to set the length\n"
"Change the 'block_size' flag to set block size\n"
"prun -v -1 -np 1 -native '-l gpu=GTX480' myhistogram <option> <length> <block_size>" << endl;
}
void verifyResults(unsigned int *histogramS, unsigned int *histogram, int hist_size){
// verify the resuls
for(int i=0; i<hist_size; i++) {
if (histogram[i]!=histogramS[i]) {
cout << "error in results! Bin " << i << " is "<< histogram[i] << ", but should be " << histogramS[i] << endl;
exit(1);
}
}
cout << "results OK!" << endl;
}
__global__ void histogramKernelAtomicLoop(unsigned char* image, long img_size, unsigned int* histogram, int hist_size) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int i, c;
for (i = threadId; i < img_size; i += gridDim.x * blockDim.x) {
c = image[i];
atomicAdd(&histogram[c], 1);
}
}
__global__ void histogramKernelAtomic(unsigned char* image, long img_size, unsigned int* histogram, int hist_size) {
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int c;
if (threadID < img_size) {
c = image[threadID];
atomicAdd(&histogram[c], 1);
}
}
__global__ void histogramKernelShared(unsigned char* image, long img_size, unsigned int* histogram, int hist_size){
extern __shared__ int local_histo[];
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int c;
if (threadIdx.x < hist_size){
local_histo[threadIdx.x] = 0;
}
__syncthreads();
if (threadID < img_size) {
c = image[threadID];
atomicAdd(&local_histo[c], 1);
} else {
return;
}
__syncthreads();
if (threadIdx.x < hist_size){
atomicAdd(&histogram[threadIdx.x], local_histo[threadIdx.x]);
}
}
void histogramCuda(unsigned char* image, long img_size, unsigned int* histogram, int hist_size, int option) {
hipError_t err;
// allocate the vectors on the GPU
unsigned char* deviceImage = NULL;
checkCudaCall(hipMalloc((void **) &deviceImage, img_size * sizeof(unsigned char)));
if (deviceImage == NULL) {
cout << "could not allocate memory!" << endl;
return;
}
unsigned int* deviceHisto = NULL;
checkCudaCall(hipMalloc((void **) &deviceHisto, hist_size * sizeof(unsigned int)));
if (deviceHisto == NULL) {
checkCudaCall(hipFree(deviceImage));
cout << "could not allocate memory!" << endl;
return;
}
err = hipMemset(deviceHisto, 0, hist_size * sizeof(unsigned int));
if (err != hipSuccess) { fprintf(stderr, "Error in hipMemset output: %s\n", hipGetErrorString( err )); }
timer kernelTime1 = timer("kernelTime1");
timer memoryTime = timer("memoryTime");
// copy the original vectors to the GPU
memoryTime.start();
checkCudaCall(hipMemcpy(deviceImage, image, img_size*sizeof(unsigned char), hipMemcpyHostToDevice));
memoryTime.stop();
// execute kernel
kernelTime1.start();
if (option == 0) {
cout << "\n----------------------Running 1 thread per pixel----------------------" << endl;
hipLaunchKernelGGL(( histogramKernelAtomic) , dim3((img_size/thread_block_size)+1), dim3(thread_block_size) , 0, 0, deviceImage, img_size, deviceHisto, hist_size);
} else if (option == 1) {
cout << "\n----------------------Running looping inside block----------------------" << endl;
hipLaunchKernelGGL(( histogramKernelAtomicLoop) , dim3(1), dim3(thread_block_size) , 0, 0, deviceImage, img_size, deviceHisto, hist_size);
} else if (option == 2){
cout << "\n----------------------Running with shared memory----------------------" << endl;
hipLaunchKernelGGL(( histogramKernelShared) , dim3((img_size/thread_block_size)+1), dim3(thread_block_size), hist_size*sizeof(int) , 0, deviceImage, img_size, deviceHisto, hist_size);
} else {
cout << "\nInvalid option: " << option << endl;
}
hipDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(hipGetLastError());
// copy result back
memoryTime.start();
checkCudaCall(hipMemcpy(histogram, deviceHisto, hist_size * sizeof(unsigned int), hipMemcpyDeviceToHost));
memoryTime.stop();
checkCudaCall(hipFree(deviceImage));
checkCudaCall(hipFree(deviceHisto));
cout << "histogram (kernel): \t\t" << kernelTime1 << endl;
cout << "histogram (memory): \t\t" << memoryTime << endl;
}
void histogramSeq(unsigned char* image, long img_size, unsigned int* histogram, int hist_size) {
int i;
timer sequentialTime = timer("Sequential");
for (i=0; i<hist_size; i++) histogram[i]=0;
sequentialTime.start();
for (i=0; i<img_size; i++) {
histogram[image[i]]++;
}
sequentialTime.stop();
cout << "histogram (sequential): \t\t" << sequentialTime << endl;
}
int main(int argc, char* argv[]) {
int hist_size = 256;
int option = 0;
if (argc < 4){
usage();
exit(-1);
}
option = atoi(argv[1]);
img_size = atoi(argv[2]);
thread_block_size = atoi(argv[3]);
unsigned char *image = (unsigned char *)malloc(img_size * sizeof(unsigned char));
unsigned int *histogramS = (unsigned int *)malloc(hist_size * sizeof(unsigned int));
unsigned int *histogram = (unsigned int *)malloc(hist_size * sizeof(unsigned int));
// initialize the vectors.
for(long i=0; i<img_size; i++) {
//image[i] = (unsigned char) (i % hist_size);
image[i] = (unsigned char) i % hist_size;
}
cout << "Compute the histogram of a gray image with "
<< img_size << " pixels using block size " << thread_block_size << endl;
cout << "\n----------------------Running sequential----------------------" << endl;
histogramSeq(image, img_size, histogramS, hist_size);
histogramCuda(image, img_size, histogram, hist_size, option);
verifyResults(histogramS, histogram, hist_size);
free(image);
free(histogram);
free(histogramS);
return 0;
}
|
fde59e5ca010eff304e431a6eb930d1023be4df7.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <iostream>
#include "timer.h"
#include "device_functions.h"
long img_size;
int thread_block_size;
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(cudaGetLastError());
*/
static void checkCudaCall(cudaError_t result) {
if (result != cudaSuccess) {
cerr << "cuda error: " << cudaGetErrorString(result) << endl;
exit(1);
}
}
void usage(){
cout << "Usage: \n"
"Set the 'option' flag to \n"
"\t0: for running naive \n"
"\t2: for 1 block with loop \n"
"\t3: for shared memory histogram with reduction \n"
"Change the 'length' flag to set the length\n"
"Change the 'block_size' flag to set block size\n"
"prun -v -1 -np 1 -native '-l gpu=GTX480' myhistogram <option> <length> <block_size>" << endl;
}
void verifyResults(unsigned int *histogramS, unsigned int *histogram, int hist_size){
// verify the resuls
for(int i=0; i<hist_size; i++) {
if (histogram[i]!=histogramS[i]) {
cout << "error in results! Bin " << i << " is "<< histogram[i] << ", but should be " << histogramS[i] << endl;
exit(1);
}
}
cout << "results OK!" << endl;
}
__global__ void histogramKernelAtomicLoop(unsigned char* image, long img_size, unsigned int* histogram, int hist_size) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int i, c;
for (i = threadId; i < img_size; i += gridDim.x * blockDim.x) {
c = image[i];
atomicAdd(&histogram[c], 1);
}
}
__global__ void histogramKernelAtomic(unsigned char* image, long img_size, unsigned int* histogram, int hist_size) {
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int c;
if (threadID < img_size) {
c = image[threadID];
atomicAdd(&histogram[c], 1);
}
}
__global__ void histogramKernelShared(unsigned char* image, long img_size, unsigned int* histogram, int hist_size){
extern __shared__ int local_histo[];
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int c;
if (threadIdx.x < hist_size){
local_histo[threadIdx.x] = 0;
}
__syncthreads();
if (threadID < img_size) {
c = image[threadID];
atomicAdd(&local_histo[c], 1);
} else {
return;
}
__syncthreads();
if (threadIdx.x < hist_size){
atomicAdd(&histogram[threadIdx.x], local_histo[threadIdx.x]);
}
}
void histogramCuda(unsigned char* image, long img_size, unsigned int* histogram, int hist_size, int option) {
cudaError_t err;
// allocate the vectors on the GPU
unsigned char* deviceImage = NULL;
checkCudaCall(cudaMalloc((void **) &deviceImage, img_size * sizeof(unsigned char)));
if (deviceImage == NULL) {
cout << "could not allocate memory!" << endl;
return;
}
unsigned int* deviceHisto = NULL;
checkCudaCall(cudaMalloc((void **) &deviceHisto, hist_size * sizeof(unsigned int)));
if (deviceHisto == NULL) {
checkCudaCall(cudaFree(deviceImage));
cout << "could not allocate memory!" << endl;
return;
}
err = cudaMemset(deviceHisto, 0, hist_size * sizeof(unsigned int));
if (err != cudaSuccess) { fprintf(stderr, "Error in cudaMemset output: %s\n", cudaGetErrorString( err )); }
timer kernelTime1 = timer("kernelTime1");
timer memoryTime = timer("memoryTime");
// copy the original vectors to the GPU
memoryTime.start();
checkCudaCall(cudaMemcpy(deviceImage, image, img_size*sizeof(unsigned char), cudaMemcpyHostToDevice));
memoryTime.stop();
// execute kernel
kernelTime1.start();
if (option == 0) {
cout << "\n----------------------Running 1 thread per pixel----------------------" << endl;
histogramKernelAtomic <<< (img_size/thread_block_size)+1, thread_block_size >>>(deviceImage, img_size, deviceHisto, hist_size);
} else if (option == 1) {
cout << "\n----------------------Running looping inside block----------------------" << endl;
histogramKernelAtomicLoop <<< 1, thread_block_size >>> (deviceImage, img_size, deviceHisto, hist_size);
} else if (option == 2){
cout << "\n----------------------Running with shared memory----------------------" << endl;
histogramKernelShared <<< (img_size/thread_block_size)+1, thread_block_size, hist_size*sizeof(int) >>> (deviceImage, img_size, deviceHisto, hist_size);
} else {
cout << "\nInvalid option: " << option << endl;
}
cudaDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(cudaGetLastError());
// copy result back
memoryTime.start();
checkCudaCall(cudaMemcpy(histogram, deviceHisto, hist_size * sizeof(unsigned int), cudaMemcpyDeviceToHost));
memoryTime.stop();
checkCudaCall(cudaFree(deviceImage));
checkCudaCall(cudaFree(deviceHisto));
cout << "histogram (kernel): \t\t" << kernelTime1 << endl;
cout << "histogram (memory): \t\t" << memoryTime << endl;
}
void histogramSeq(unsigned char* image, long img_size, unsigned int* histogram, int hist_size) {
int i;
timer sequentialTime = timer("Sequential");
for (i=0; i<hist_size; i++) histogram[i]=0;
sequentialTime.start();
for (i=0; i<img_size; i++) {
histogram[image[i]]++;
}
sequentialTime.stop();
cout << "histogram (sequential): \t\t" << sequentialTime << endl;
}
int main(int argc, char* argv[]) {
int hist_size = 256;
int option = 0;
if (argc < 4){
usage();
exit(-1);
}
option = atoi(argv[1]);
img_size = atoi(argv[2]);
thread_block_size = atoi(argv[3]);
unsigned char *image = (unsigned char *)malloc(img_size * sizeof(unsigned char));
unsigned int *histogramS = (unsigned int *)malloc(hist_size * sizeof(unsigned int));
unsigned int *histogram = (unsigned int *)malloc(hist_size * sizeof(unsigned int));
// initialize the vectors.
for(long i=0; i<img_size; i++) {
//image[i] = (unsigned char) (i % hist_size);
image[i] = (unsigned char) i % hist_size;
}
cout << "Compute the histogram of a gray image with "
<< img_size << " pixels using block size " << thread_block_size << endl;
cout << "\n----------------------Running sequential----------------------" << endl;
histogramSeq(image, img_size, histogramS, hist_size);
histogramCuda(image, img_size, histogram, hist_size, option);
verifyResults(histogramS, histogram, hist_size);
free(image);
free(histogram);
free(histogramS);
return 0;
}
|
ccdcf46b072dfd28c61649f1ab52bd98caa82a11.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N 1024 * 1024
#define threads_per_block 512
__global__ void GPU_big_dot(float *a, float *b, float *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index] * b[index];
}
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
long long stop_timer(long long start_time, const char *name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
printf("%s: %.5f sec\n", name,
((float)(end_time - start_time)) / (1000 * 1000));
return end_time - start_time;
}
float *CPU_big_dot(float *A, float *B, int n) {
static float cpuResult = 0.0;
for (int i = 0; i < n; i++) {
// printf("\n a * b = %f\t\n", A[i] * B[i]);
cpuResult += A[i] * B[i];
}
// printf("inside cpu function's sum = %f\t\n", cpuResult);
return &cpuResult;
}
float *GPU_big_dot(float *vector1, float *vector2, int vector_size) {
float *d_v1, *d_v2, *d_result, *GPUResult;
static float gpuSum = 0.0;
// allocate memory space for device memory
hipMalloc((void **)&d_v1, vector_size);
hipMalloc((void **)&d_v2, vector_size);
hipMalloc((void **)&d_result, vector_size);
GPUResult = (float *)malloc(vector_size);
// copy vectors from host to device
long long GPU_start_time = start_timer();
hipMemcpy(d_v1, vector1, vector_size, hipMemcpyHostToDevice);
hipMemcpy(d_v2, vector2, vector_size, hipMemcpyHostToDevice);
long long GPU_total_time = stop_timer(
GPU_start_time,
(char *)"Memory allocation and data transfer from CPU to GPU time ");
// launch add()kernel
GPU_start_time = start_timer();
hipLaunchKernelGGL(( GPU_big_dot), dim3((N + threads_per_block - 1) / threads_per_block),
dim3(threads_per_block), 0, 0, d_v1, d_v2, d_result, N);
GPU_total_time = stop_timer(GPU_start_time, (char *)"Kernel execution time");
// copy result back to host
GPU_start_time = start_timer();
hipMemcpy(GPUResult, d_result, vector_size, hipMemcpyDeviceToHost);
GPU_total_time =
stop_timer(GPU_start_time, (char *)"Data transfer from GPU to CPU time");
// cleanup
hipFree(d_v1);
hipFree(d_v2);
hipFree(d_result);
for (int i = 0; i < N; i++) {
gpuSum += GPUResult[i];
// printf("mutlix %f\t\n", GPUResult[i]);
}
return &gpuSum;
// printf("gpuSum in function %f\t\n", gpuSum);
}
void random_vecotr_init(float *vector) {
for (int i = 0; i < N; i++) {
vector[i] = (float)rand() / RAND_MAX;
}
}
void print_vector(float *vector, char *name) {
printf("%s = \n", name);
for (int i = 0; i < N; i++) {
printf("%f\t\n", vector[i]);
}
}
int main(int argc, char *argv[]) {
float *vector1, *vector2, *finalCPUResult, *finalGPUResult;
int vector_size = sizeof(float) * N;
// allocate memory space for host memory
vector1 = (float *)malloc(vector_size);
vector2 = (float *)malloc(vector_size);
// init two vectors with random float numbers
srand(time(NULL));
random_vecotr_init(vector1);
random_vecotr_init(vector2);
// print_vector(vector1, (char *)"vector1");
// print_vector(vector2, (char *)"vector2");
// run on cpu
long long cpu_start_time = start_timer();
finalCPUResult = CPU_big_dot(vector1, vector2, N);
long long cpu_total_time = stop_timer(
cpu_start_time, (char *)"Total computation time for CPU_big_dot()");
// run on gpu
long long GPU_total_start_time = start_timer();
finalGPUResult = GPU_big_dot(vector1, vector2, vector_size);
long long GPU_total_end_time = stop_timer(
GPU_total_start_time, (char *)"Total computation time for GPU_big_dot()");
// convert to seconds
float cpu_total_time_sec = (float)cpu_total_time / (1000 * 1000);
float GPU_total_end_time_sec = (float)GPU_total_end_time / (1000 * 1000);
float speedup = cpu_total_time_sec / GPU_total_end_time_sec;
printf("speed up = %.5f\t\n", speedup);
float compareResult = *finalCPUResult - *finalGPUResult;
if (fabs(compareResult) <= 0.000001) {
printf("CPU computing result and GPU computing result are the same, the "
"results are correct, the difference is %f\n",
compareResult);
} else {
printf("CPU computing result and GPU computing result are NOT the same, "
"the results are NOT correct, the difference is %f\n",
compareResult);
}
printf("CPU computation result in float format = %f\t\n", *finalCPUResult);
printf("CPU computation result in scientific notation= %e\t\n",
*finalCPUResult);
printf("GPU computation result in float format = %f\t\n", *finalGPUResult);
printf("GPU computation result in scientific notation = %e\t\n",
*finalGPUResult);
return 0;
}
|
ccdcf46b072dfd28c61649f1ab52bd98caa82a11.cu
|
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N 1024 * 1024
#define threads_per_block 512
__global__ void GPU_big_dot(float *a, float *b, float *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index] * b[index];
}
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
long long stop_timer(long long start_time, const char *name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
printf("%s: %.5f sec\n", name,
((float)(end_time - start_time)) / (1000 * 1000));
return end_time - start_time;
}
float *CPU_big_dot(float *A, float *B, int n) {
static float cpuResult = 0.0;
for (int i = 0; i < n; i++) {
// printf("\n a * b = %f\t\n", A[i] * B[i]);
cpuResult += A[i] * B[i];
}
// printf("inside cpu function's sum = %f\t\n", cpuResult);
return &cpuResult;
}
float *GPU_big_dot(float *vector1, float *vector2, int vector_size) {
float *d_v1, *d_v2, *d_result, *GPUResult;
static float gpuSum = 0.0;
// allocate memory space for device memory
cudaMalloc((void **)&d_v1, vector_size);
cudaMalloc((void **)&d_v2, vector_size);
cudaMalloc((void **)&d_result, vector_size);
GPUResult = (float *)malloc(vector_size);
// copy vectors from host to device
long long GPU_start_time = start_timer();
cudaMemcpy(d_v1, vector1, vector_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_v2, vector2, vector_size, cudaMemcpyHostToDevice);
long long GPU_total_time = stop_timer(
GPU_start_time,
(char *)"Memory allocation and data transfer from CPU to GPU time ");
// launch add()kernel
GPU_start_time = start_timer();
GPU_big_dot<<<(N + threads_per_block - 1) / threads_per_block,
threads_per_block>>>(d_v1, d_v2, d_result, N);
GPU_total_time = stop_timer(GPU_start_time, (char *)"Kernel execution time");
// copy result back to host
GPU_start_time = start_timer();
cudaMemcpy(GPUResult, d_result, vector_size, cudaMemcpyDeviceToHost);
GPU_total_time =
stop_timer(GPU_start_time, (char *)"Data transfer from GPU to CPU time");
// cleanup
cudaFree(d_v1);
cudaFree(d_v2);
cudaFree(d_result);
for (int i = 0; i < N; i++) {
gpuSum += GPUResult[i];
// printf("mutlix %f\t\n", GPUResult[i]);
}
return &gpuSum;
// printf("gpuSum in function %f\t\n", gpuSum);
}
void random_vecotr_init(float *vector) {
for (int i = 0; i < N; i++) {
vector[i] = (float)rand() / RAND_MAX;
}
}
void print_vector(float *vector, char *name) {
printf("%s = \n", name);
for (int i = 0; i < N; i++) {
printf("%f\t\n", vector[i]);
}
}
int main(int argc, char *argv[]) {
float *vector1, *vector2, *finalCPUResult, *finalGPUResult;
int vector_size = sizeof(float) * N;
// allocate memory space for host memory
vector1 = (float *)malloc(vector_size);
vector2 = (float *)malloc(vector_size);
// init two vectors with random float numbers
srand(time(NULL));
random_vecotr_init(vector1);
random_vecotr_init(vector2);
// print_vector(vector1, (char *)"vector1");
// print_vector(vector2, (char *)"vector2");
// run on cpu
long long cpu_start_time = start_timer();
finalCPUResult = CPU_big_dot(vector1, vector2, N);
long long cpu_total_time = stop_timer(
cpu_start_time, (char *)"Total computation time for CPU_big_dot()");
// run on gpu
long long GPU_total_start_time = start_timer();
finalGPUResult = GPU_big_dot(vector1, vector2, vector_size);
long long GPU_total_end_time = stop_timer(
GPU_total_start_time, (char *)"Total computation time for GPU_big_dot()");
// convert to seconds
float cpu_total_time_sec = (float)cpu_total_time / (1000 * 1000);
float GPU_total_end_time_sec = (float)GPU_total_end_time / (1000 * 1000);
float speedup = cpu_total_time_sec / GPU_total_end_time_sec;
printf("speed up = %.5f\t\n", speedup);
float compareResult = *finalCPUResult - *finalGPUResult;
if (fabs(compareResult) <= 0.000001) {
printf("CPU computing result and GPU computing result are the same, the "
"results are correct, the difference is %f\n",
compareResult);
} else {
printf("CPU computing result and GPU computing result are NOT the same, "
"the results are NOT correct, the difference is %f\n",
compareResult);
}
printf("CPU computation result in float format = %f\t\n", *finalCPUResult);
printf("CPU computation result in scientific notation= %e\t\n",
*finalCPUResult);
printf("GPU computation result in float format = %f\t\n", *finalGPUResult);
printf("GPU computation result in scientific notation = %e\t\n",
*finalGPUResult);
return 0;
}
|
9704c29ebda06ce3091cb81d2abeff55c207fa1d.hip
|
// !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA functions for ray-voxel intersection based projection
*
* This file has the necesary fucntiosn to perform X-ray CBCT projection
* operation given a geaometry, angles and image. It usesthe so-called
* Jacobs algorithm to compute efficiently the length of the x-rays over
* voxel space.
*
* CODE by Ander Biguri
*
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Copyright (c) 2015, University of Bath and CERN- European Organization for
Nuclear Research
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
Contact: [email protected]
Codes : https://github.com/CERN/TIGRE
---------------------------------------------------------------------------
*/
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "Siddon_projection.hpp"
//#include "mex.h"
#include <math.h>
#include <stdio.h>
#include "projection.hpp"
// if (__err != hipSuccess) { \
// printf("%s \n", msg);\
// printf("%s \n", hipGetErrorString(__err));\
// } \
// TODO: Error logging
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
} while (0)
// mexPrintf("%s \n",msg);\
// mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\
// Declare the texture reference.
texture<float, hipTextureType3D , hipReadModeElementType> tex;
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
__global__ void kernelPixelDetector( Geometry geo,
float* detector,
Point3D source ,
Point3D deltaU,
Point3D deltaV,
Point3D uvOrigin){
// size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long idx = x * geo.nDetecV + y;
if ((x>= geo.nDetecU) | (y>= geo.nDetecV))
return;
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-y-1;
int pixelU = x;
Point3D pixel1D;
pixel1D.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
pixel1D.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
pixel1D.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
///////
// Siddon's ray-voxel intersection, optimized as in doi=10.1.1.55.7516
//////
// Also called Jacobs algorithms
Point3D ray;
// vector of Xray
ray.x=pixel1D.x-source.x;
ray.y=pixel1D.y-source.y;
ray.z=pixel1D.z-source.z;
// This variables are ommited because
// bx,by,bz ={0,0,0}
// dx,dy,dz ={1,1,1}
// compute parameter values for x-ray parametric equation. eq(3-10)
float axm,aym,azm;
float axM,ayM,azM;
// In the paper Nx= number of X planes-> Nvoxel+1
axm=min(-source.x/ray.x,(geo.nVoxelX-source.x)/ray.x);
aym=min(-source.y/ray.y,(geo.nVoxelY-source.y)/ray.y);
azm=min(-source.z/ray.z,(geo.nVoxelZ-source.z)/ray.z);
axM=max(-source.x/ray.x,(geo.nVoxelX-source.x)/ray.x);
ayM=max(-source.y/ray.y,(geo.nVoxelY-source.y)/ray.y);
azM=max(-source.z/ray.z,(geo.nVoxelZ-source.z)/ray.z);
float am=max(max(axm,aym),azm);
float aM=min(min(axM,ayM),azM);
// line intersects voxel space -> am<aM
if (am>=aM)
detector[idx]=0;
// Compute max/min image INDEX for intersection eq(11-19)
// Discussion about ternary operator in CUDA: https://stackoverflow.com/questions/7104384/in-cuda-why-is-a-b010-more-efficient-than-an-if-else-version
float imin,imax,jmin,jmax,kmin,kmax;
// for X
if( source.x<pixel1D.x){
imin=(am==axm)? 1 : ceil (source.x+am*ray.x);
imax=(aM==axM)? geo.nVoxelX : floor(source.x+aM*ray.x);
}else{
imax=(am==axm)? geo.nVoxelX-1 : floor(source.x+am*ray.x);
imin=(aM==axM)? 0 : ceil (source.x+aM*ray.x);
}
// for Y
if( source.y<pixel1D.y){
jmin=(am==aym)? 1 : ceil (source.y+am*ray.y);
jmax=(aM==ayM)? geo.nVoxelY : floor(source.y+aM*ray.y);
}else{
jmax=(am==aym)? geo.nVoxelY-1 : floor(source.y+am*ray.y);
jmin=(aM==ayM)? 0 : ceil (source.y+aM*ray.y);
}
// for Z
if( source.z<pixel1D.z){
kmin=(am==azm)? 1 : ceil (source.z+am*ray.z);
kmax=(aM==azM)? geo.nVoxelZ : floor(source.z+aM*ray.z);
}else{
kmax=(am==azm)? geo.nVoxelZ-1 : floor(source.z+am*ray.z);
kmin=(aM==azM)? 0 : ceil (source.z+aM*ray.z);
}
// get intersection point N1. eq(20-21) [(also eq 9-10)]
float ax,ay,az;
ax=(source.x<pixel1D.x)? (imin-source.x)/ray.x : (imax-source.x)/ray.x;
ay=(source.y<pixel1D.y)? (jmin-source.y)/ray.y : (jmax-source.y)/ray.y;
az=(source.z<pixel1D.z)? (kmin-source.z)/ray.z : (kmax-source.z)/ray.z;
// get index of first intersection. eq (26) and (19)
int i,j,k;
float aminc=min(min(ax,ay),az);
i=(int)floor(source.x+ (aminc+am)/2*ray.x);
j=(int)floor(source.y+ (aminc+am)/2*ray.y);
k=(int)floor(source.z+ (aminc+am)/2*ray.z);
// Initialize
float ac=am;
//eq (28), unit alphas
float axu,ayu,azu;
axu=1/abs(ray.x);
ayu=1/abs(ray.y);
azu=1/abs(ray.z);
// eq(29), direction of update
float iu,ju,ku;
iu=(source.x< pixel1D.x)? 1 : -1;
ju=(source.y< pixel1D.y)? 1 : -1;
ku=(source.z< pixel1D.z)? 1 : -1;
float maxlength=sqrt(ray.x*ray.x*geo.dVoxelX*geo.dVoxelX+ray.y*ray.y*geo.dVoxelY*geo.dVoxelY+ray.z*ray.z*geo.dVoxelZ*geo.dVoxelZ);
float sum=0;
unsigned int Np=(imax-imin+1)+(jmax-jmin+1)+(kmax-kmin+1); // Number of intersections
// Go iterating over the line, intersection by intersection. If double point, no worries, 0 will be computed
for (unsigned int ii=0;ii<Np;ii++){
if (ax==aminc){
sum+=(ax-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5);
i=i+iu;
ac=ax;
ax+=axu;
}else if(ay==aminc){
sum+=(ay-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5);
j=j+ju;
ac=ay;
ay+=ayu;
}else if(az==aminc){
sum+=(az-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5);
k=k+ku;
ac=az;
az+=azu;
}
aminc=min(min(ax,ay),az);
}
detector[idx]=sum*maxlength;
}
int siddon_ray_projection(float const * const img, Geometry geo, float** result,float const * const alphas,int nalpha){
//DONE, Tesla found
// copy data to CUDA memory
hipArray *d_imagedata = 0;
const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_imagedata, &channelDesc, extent);
cudaCheckErrors("hipMalloc3D error 3D tex");
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_imagedata;
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
cudaCheckErrors("hipMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = hipFilterModePoint; //we dotn want itnerpolation
tex.addressMode[0] = hipAddressModeBorder;
tex.addressMode[1] = hipAddressModeBorder;
tex.addressMode[2] = hipAddressModeBorder;
hipBindTextureToArray(tex, d_imagedata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
//Done! Image put into texture memory.
size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float);
float* dProjection;
hipMalloc((void**)&dProjection, num_bytes);
hipMemset(dProjection,0,num_bytes);
cudaCheckErrors("hipMalloc fail");
bool timekernel=false; // For debuggin purposes
hipEvent_t start, stop;
float elapsedTime;
if (timekernel){
hipEventCreate(&start);
hipEventRecord(start,0);
}
Point3D source, deltaU, deltaV, uvOrigin;
// 16x16 gave the best performance empirically
// Funnily that makes it compatible with most GPUs.....
int divU,divV;
divU=16;
divV=16;
dim3 grid((geo.nDetecU+divU-1)/divU,(geo.nDetecV+divV-1)/divV,1);
dim3 block(divU,divV,1);
for (unsigned int i=0;i<nalpha;i++){
geo.alpha=alphas[i];
//precomute distances for faster execution
//Precompute per angle constant stuff for speed
computeDeltas_Siddon(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source);
//Ray tracing!
hipLaunchKernelGGL(( kernelPixelDetector), dim3(grid),dim3(block), 0, 0, geo,dProjection, source, deltaU, deltaV, uvOrigin);
cudaCheckErrors("Kernel fail");
// copy result to host
hipMemcpy(result[i], dProjection, num_bytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy fail");
}
if (timekernel){
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
// mexPrintf("%f\n" ,elapsedTime); // TODO: Timing logging
}
hipUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
hipFree(dProjection);
hipFreeArray(d_imagedata);
cudaCheckErrors("hipFree d_imagedata fail");
// tehre is no need to reset the device, but if one whants to use the NVIDIA Visual profiler, one should.
//hipDeviceReset();
return 0;
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas_Siddon(Geometry geo, float alpha,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO;
S.y=0;
S.z=0;
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD-geo.DSO); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD-geo.DSO); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD-geo.DSO); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
// Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours.
// The obkjective is to get a position of the detector in a coordinate system where:
// 1-units are voxel size (in each direction can be different)
// 2-The image has the its first voxel at (0,0,0)
// 3-The image never rotates
// To do that, we need to compute the "deltas" the detector, or "by how much
// (in new xyz) does the voxels change when and index is added". To do that
// several geometric steps needs to be changed
//1.Roll,pitch,jaw
// The detector can have a small rotation.
// according to
//"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706.
// Only the Z rotation will have a big influence in the image quality when they are small.
// Still all rotations are supported
// To roll pitch jaw, the detector has to be in centered in OXYZ.
P.x=0;Pu0.x=0;Pv0.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&P);
rollPitchYaw(geo,i,&Pu0);
rollPitchYaw(geo,i,&Pv0);
//Now ltes translate the points where they shoudl be:
P.x=P.x-(geo.DSD-geo.DSO);
Pu0.x=Pu0.x-(geo.DSD-geo.DSO);
Pv0.x=Pv0.x-(geo.DSD-geo.DSO);
//1: Offset detector
//P.x
P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i];
Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i];
Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i];
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x*cos(geo.alpha)-P.y*sin(geo.alpha); Pfinal.y =P.y*cos(geo.alpha)+P.x*sin(geo.alpha); Pfinal.z =P.z;
Pfinalu0.x=Pu0.x*cos(geo.alpha)-Pu0.y*sin(geo.alpha); Pfinalu0.y=Pu0.y*cos(geo.alpha)+Pu0.x*sin(geo.alpha); Pfinalu0.z=Pu0.z;
Pfinalv0.x=Pv0.x*cos(geo.alpha)-Pv0.y*sin(geo.alpha); Pfinalv0.y=Pv0.y*cos(geo.alpha)+Pv0.x*sin(geo.alpha); Pfinalv0.z=Pv0.z;
Point3D S2;
S2.x=S.x*cos(geo.alpha)-S.y*sin(geo.alpha);
S2.y=S.y*cos(geo.alpha)+S.x*sin(geo.alpha);
S2.z=S.z;
//2: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S2.x=S2.x-geo.offOrigX[i]; S2.y=S2.y-geo.offOrigY[i]; S2.z=S2.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2;
S2.x =S2.x+geo.sVoxelX/2; S2.y =S2.y+geo.sVoxelY/2; S2.z =S2.z +geo.sVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S2.x =S2.x/geo.dVoxelX; S2.y =S2.y/geo.dVoxelY; S2.z =S2.z/geo.dVoxelZ;
//mexPrintf("COR: %f \n",geo.COR[i]);
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S2.x+=CORx; S2.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S2;
}
|
9704c29ebda06ce3091cb81d2abeff55c207fa1d.cu
|
/*-------------------------------------------------------------------------
*
* CUDA functions for ray-voxel intersection based projection
*
* This file has the necesary fucntiosn to perform X-ray CBCT projection
* operation given a geaometry, angles and image. It usesthe so-called
* Jacobs algorithm to compute efficiently the length of the x-rays over
* voxel space.
*
* CODE by Ander Biguri
*
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Copyright (c) 2015, University of Bath and CERN- European Organization for
Nuclear Research
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
Contact: [email protected]
Codes : https://github.com/CERN/TIGRE
---------------------------------------------------------------------------
*/
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "Siddon_projection.hpp"
//#include "mex.h"
#include <math.h>
#include <stdio.h>
#include "projection.hpp"
// if (__err != cudaSuccess) { \
// printf("%s \n", msg);\
// printf("%s \n", cudaGetErrorString(__err));\
// } \
// TODO: Error logging
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
} while (0)
// mexPrintf("%s \n",msg);\
// mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
// Declare the texture reference.
texture<float, cudaTextureType3D , cudaReadModeElementType> tex;
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
__global__ void kernelPixelDetector( Geometry geo,
float* detector,
Point3D source ,
Point3D deltaU,
Point3D deltaV,
Point3D uvOrigin){
// size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long idx = x * geo.nDetecV + y;
if ((x>= geo.nDetecU) | (y>= geo.nDetecV))
return;
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-y-1;
int pixelU = x;
Point3D pixel1D;
pixel1D.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
pixel1D.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
pixel1D.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
///////
// Siddon's ray-voxel intersection, optimized as in doi=10.1.1.55.7516
//////
// Also called Jacobs algorithms
Point3D ray;
// vector of Xray
ray.x=pixel1D.x-source.x;
ray.y=pixel1D.y-source.y;
ray.z=pixel1D.z-source.z;
// This variables are ommited because
// bx,by,bz ={0,0,0}
// dx,dy,dz ={1,1,1}
// compute parameter values for x-ray parametric equation. eq(3-10)
float axm,aym,azm;
float axM,ayM,azM;
// In the paper Nx= number of X planes-> Nvoxel+1
axm=min(-source.x/ray.x,(geo.nVoxelX-source.x)/ray.x);
aym=min(-source.y/ray.y,(geo.nVoxelY-source.y)/ray.y);
azm=min(-source.z/ray.z,(geo.nVoxelZ-source.z)/ray.z);
axM=max(-source.x/ray.x,(geo.nVoxelX-source.x)/ray.x);
ayM=max(-source.y/ray.y,(geo.nVoxelY-source.y)/ray.y);
azM=max(-source.z/ray.z,(geo.nVoxelZ-source.z)/ray.z);
float am=max(max(axm,aym),azm);
float aM=min(min(axM,ayM),azM);
// line intersects voxel space -> am<aM
if (am>=aM)
detector[idx]=0;
// Compute max/min image INDEX for intersection eq(11-19)
// Discussion about ternary operator in CUDA: https://stackoverflow.com/questions/7104384/in-cuda-why-is-a-b010-more-efficient-than-an-if-else-version
float imin,imax,jmin,jmax,kmin,kmax;
// for X
if( source.x<pixel1D.x){
imin=(am==axm)? 1 : ceil (source.x+am*ray.x);
imax=(aM==axM)? geo.nVoxelX : floor(source.x+aM*ray.x);
}else{
imax=(am==axm)? geo.nVoxelX-1 : floor(source.x+am*ray.x);
imin=(aM==axM)? 0 : ceil (source.x+aM*ray.x);
}
// for Y
if( source.y<pixel1D.y){
jmin=(am==aym)? 1 : ceil (source.y+am*ray.y);
jmax=(aM==ayM)? geo.nVoxelY : floor(source.y+aM*ray.y);
}else{
jmax=(am==aym)? geo.nVoxelY-1 : floor(source.y+am*ray.y);
jmin=(aM==ayM)? 0 : ceil (source.y+aM*ray.y);
}
// for Z
if( source.z<pixel1D.z){
kmin=(am==azm)? 1 : ceil (source.z+am*ray.z);
kmax=(aM==azM)? geo.nVoxelZ : floor(source.z+aM*ray.z);
}else{
kmax=(am==azm)? geo.nVoxelZ-1 : floor(source.z+am*ray.z);
kmin=(aM==azM)? 0 : ceil (source.z+aM*ray.z);
}
// get intersection point N1. eq(20-21) [(also eq 9-10)]
float ax,ay,az;
ax=(source.x<pixel1D.x)? (imin-source.x)/ray.x : (imax-source.x)/ray.x;
ay=(source.y<pixel1D.y)? (jmin-source.y)/ray.y : (jmax-source.y)/ray.y;
az=(source.z<pixel1D.z)? (kmin-source.z)/ray.z : (kmax-source.z)/ray.z;
// get index of first intersection. eq (26) and (19)
int i,j,k;
float aminc=min(min(ax,ay),az);
i=(int)floor(source.x+ (aminc+am)/2*ray.x);
j=(int)floor(source.y+ (aminc+am)/2*ray.y);
k=(int)floor(source.z+ (aminc+am)/2*ray.z);
// Initialize
float ac=am;
//eq (28), unit alphas
float axu,ayu,azu;
axu=1/abs(ray.x);
ayu=1/abs(ray.y);
azu=1/abs(ray.z);
// eq(29), direction of update
float iu,ju,ku;
iu=(source.x< pixel1D.x)? 1 : -1;
ju=(source.y< pixel1D.y)? 1 : -1;
ku=(source.z< pixel1D.z)? 1 : -1;
float maxlength=sqrt(ray.x*ray.x*geo.dVoxelX*geo.dVoxelX+ray.y*ray.y*geo.dVoxelY*geo.dVoxelY+ray.z*ray.z*geo.dVoxelZ*geo.dVoxelZ);
float sum=0;
unsigned int Np=(imax-imin+1)+(jmax-jmin+1)+(kmax-kmin+1); // Number of intersections
// Go iterating over the line, intersection by intersection. If double point, no worries, 0 will be computed
for (unsigned int ii=0;ii<Np;ii++){
if (ax==aminc){
sum+=(ax-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5);
i=i+iu;
ac=ax;
ax+=axu;
}else if(ay==aminc){
sum+=(ay-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5);
j=j+ju;
ac=ay;
ay+=ayu;
}else if(az==aminc){
sum+=(az-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5);
k=k+ku;
ac=az;
az+=azu;
}
aminc=min(min(ax,ay),az);
}
detector[idx]=sum*maxlength;
}
int siddon_ray_projection(float const * const img, Geometry geo, float** result,float const * const alphas,int nalpha){
//DONE, Tesla found
// copy data to CUDA memory
cudaArray *d_imagedata = 0;
const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_imagedata, &channelDesc, extent);
cudaCheckErrors("cudaMalloc3D error 3D tex");
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_imagedata;
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
cudaCheckErrors("cudaMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = cudaFilterModePoint; //we dotn want itnerpolation
tex.addressMode[0] = cudaAddressModeBorder;
tex.addressMode[1] = cudaAddressModeBorder;
tex.addressMode[2] = cudaAddressModeBorder;
cudaBindTextureToArray(tex, d_imagedata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
//Done! Image put into texture memory.
size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float);
float* dProjection;
cudaMalloc((void**)&dProjection, num_bytes);
cudaMemset(dProjection,0,num_bytes);
cudaCheckErrors("cudaMalloc fail");
bool timekernel=false; // For debuggin purposes
cudaEvent_t start, stop;
float elapsedTime;
if (timekernel){
cudaEventCreate(&start);
cudaEventRecord(start,0);
}
Point3D source, deltaU, deltaV, uvOrigin;
// 16x16 gave the best performance empirically
// Funnily that makes it compatible with most GPUs.....
int divU,divV;
divU=16;
divV=16;
dim3 grid((geo.nDetecU+divU-1)/divU,(geo.nDetecV+divV-1)/divV,1);
dim3 block(divU,divV,1);
for (unsigned int i=0;i<nalpha;i++){
geo.alpha=alphas[i];
//precomute distances for faster execution
//Precompute per angle constant stuff for speed
computeDeltas_Siddon(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source);
//Ray tracing!
kernelPixelDetector<<<grid,block>>>(geo,dProjection, source, deltaU, deltaV, uvOrigin);
cudaCheckErrors("Kernel fail");
// copy result to host
cudaMemcpy(result[i], dProjection, num_bytes, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy fail");
}
if (timekernel){
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
// mexPrintf("%f\n" ,elapsedTime); // TODO: Timing logging
}
cudaUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
cudaFree(dProjection);
cudaFreeArray(d_imagedata);
cudaCheckErrors("cudaFree d_imagedata fail");
// tehre is no need to reset the device, but if one whants to use the NVIDIA Visual profiler, one should.
//cudaDeviceReset();
return 0;
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas_Siddon(Geometry geo, float alpha,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO;
S.y=0;
S.z=0;
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD-geo.DSO); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD-geo.DSO); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD-geo.DSO); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
// Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours.
// The obkjective is to get a position of the detector in a coordinate system where:
// 1-units are voxel size (in each direction can be different)
// 2-The image has the its first voxel at (0,0,0)
// 3-The image never rotates
// To do that, we need to compute the "deltas" the detector, or "by how much
// (in new xyz) does the voxels change when and index is added". To do that
// several geometric steps needs to be changed
//1.Roll,pitch,jaw
// The detector can have a small rotation.
// according to
//"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706.
// Only the Z rotation will have a big influence in the image quality when they are small.
// Still all rotations are supported
// To roll pitch jaw, the detector has to be in centered in OXYZ.
P.x=0;Pu0.x=0;Pv0.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&P);
rollPitchYaw(geo,i,&Pu0);
rollPitchYaw(geo,i,&Pv0);
//Now ltes translate the points where they shoudl be:
P.x=P.x-(geo.DSD-geo.DSO);
Pu0.x=Pu0.x-(geo.DSD-geo.DSO);
Pv0.x=Pv0.x-(geo.DSD-geo.DSO);
//1: Offset detector
//P.x
P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i];
Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i];
Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i];
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x*cos(geo.alpha)-P.y*sin(geo.alpha); Pfinal.y =P.y*cos(geo.alpha)+P.x*sin(geo.alpha); Pfinal.z =P.z;
Pfinalu0.x=Pu0.x*cos(geo.alpha)-Pu0.y*sin(geo.alpha); Pfinalu0.y=Pu0.y*cos(geo.alpha)+Pu0.x*sin(geo.alpha); Pfinalu0.z=Pu0.z;
Pfinalv0.x=Pv0.x*cos(geo.alpha)-Pv0.y*sin(geo.alpha); Pfinalv0.y=Pv0.y*cos(geo.alpha)+Pv0.x*sin(geo.alpha); Pfinalv0.z=Pv0.z;
Point3D S2;
S2.x=S.x*cos(geo.alpha)-S.y*sin(geo.alpha);
S2.y=S.y*cos(geo.alpha)+S.x*sin(geo.alpha);
S2.z=S.z;
//2: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S2.x=S2.x-geo.offOrigX[i]; S2.y=S2.y-geo.offOrigY[i]; S2.z=S2.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2;
S2.x =S2.x+geo.sVoxelX/2; S2.y =S2.y+geo.sVoxelY/2; S2.z =S2.z +geo.sVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S2.x =S2.x/geo.dVoxelX; S2.y =S2.y/geo.dVoxelY; S2.z =S2.z/geo.dVoxelZ;
//mexPrintf("COR: %f \n",geo.COR[i]);
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S2.x+=CORx; S2.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S2;
}
|
0867963607bd82b6b9777a0eb91ca73a4472cf52.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "ATen/hip/HIPApplyUtils.cuh"
#include "ATen/hip/HIPContext.h"
#include "ATen/NativeFunctions.h"
#include "ATen/TensorUtils.h"
#include "ATen/Utils.h"
#include "c10/util/Exception.h"
#include <THH/THHGeneral.h>
#include "THH/THHNumerics.cuh"
#include "THH/THHDeviceUtils.cuh"
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
__host__ __device__ __forceinline__ int imin(int a, int b) {
return a > b ? b : a;
}
__host__ __device__ __forceinline__ int imax(int a, int b) {
return a > b ? a : b;
}
namespace {
template <typename scalar_t>
__global__ void replication_pad_forward_kernel1d(
PackedTensorAccessor64<scalar_t, 3> input,
PackedTensorAccessor64<scalar_t, 3> output,
int padL, int padR) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= output.size(2)) {
return;
}
int outputPointX = outputPointId % output.size(2);
int iStartX = imax(0, -padL);
int oStartX = imax(0, padL);
int inputPointX = imin(imax(padL, outputPointX), input.size(2) + padL - 1) - oStartX + iStartX;
scalar_t valueToCopy = input[batch][plane][inputPointX];
output[batch][plane][outputPointX] = valueToCopy;
}
template <typename scalar_t>
__global__ void replication_pad_backward_kernel(
PackedTensorAccessor64<scalar_t, 3> gradInput,
PackedTensorAccessor64<scalar_t, 3> gradOutput,
int padL, int padR) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= gradOutput.size(2)) {
return;
}
int outputPointX = outputPointId % gradOutput.size(2);
int iStartX = imax(0, -padL);
int oStartX = imax(0, padL);
int inputPointX = imin(imax(padL, outputPointX), gradInput.size(2) + padL - 1) - oStartX + iStartX;
scalar_t valueToCopy = gradOutput[batch][plane][outputPointX];
atomicAdd(&gradInput[batch][plane][inputPointX], valueToCopy);
}
template <typename scalar_t>
__global__ void replication_pad_forward_kernel2d(
PackedTensorAccessor64<scalar_t, 4> input,
PackedTensorAccessor64<scalar_t, 4> output,
int padT, int padB, int padL, int padR) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= output.size(2) * output.size(3)) {
return;
}
int outputPointX = outputPointId % output.size(3);
int outputPointY = outputPointId / output.size(3);
int iStartX = imax(0, -padL);
int iStartY = imax(0, -padT);
int oStartX = imax(0, padL);
int oStartY = imax(0, padT);
int inputPointX = imin(imax(padL, outputPointX), input.size(3) + padL - 1) - oStartX + iStartX;
int inputPointY = imin(imax(padT, outputPointY), input.size(2) + padT - 1) - oStartY + iStartY;
scalar_t valueToCopy = input[batch][plane][inputPointY][inputPointX];
output[batch][plane][outputPointY][outputPointX] = valueToCopy;
}
template <typename scalar_t>
__global__ void replication_pad_backward_kernel(
PackedTensorAccessor64<scalar_t, 4> gradInput,
PackedTensorAccessor64<scalar_t, 4> gradOutput,
int padT, int padB, int padL, int padR) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= gradOutput.size(2) * gradOutput.size(3)) {
return;
}
int outputPointX = outputPointId % gradOutput.size(3);
int outputPointY = outputPointId / gradOutput.size(3);
int iStartX = imax(0, -padL);
int iStartY = imax(0, -padT);
int oStartX = imax(0, padL);
int oStartY = imax(0, padT);
int inputPointX = imin(imax(padL, outputPointX), gradInput.size(3) + padL - 1) - oStartX + iStartX;
int inputPointY = imin(imax(padT, outputPointY), gradInput.size(2) + padT - 1) - oStartY + iStartY;
scalar_t valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX];
atomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy);
}
template <typename scalar_t>
__global__ void replication_pad_forward_kernel3d(
PackedTensorAccessor64<scalar_t, 5> input,
PackedTensorAccessor64<scalar_t, 5> output,
int pfront, int pback, int ptop, int pbottom, int pleft, int pright) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= (output.size(2) * output.size(3) *
output.size(4))) {
return;
}
int outputPointX = outputPointId % output.size(4);
int outputPointY = (outputPointId / output.size(4)) % output.size(3);
int outputPointZ = outputPointId / (output.size(3) * output.size(4));
int iStartX = imax(0, -pleft);
int iStartY = imax(0, -ptop);
int iStartZ = imax(0, -pfront);
int oStartX = imax(0, pleft);
int oStartY = imax(0, ptop);
int oStartZ = imax(0, pfront);
int inputPointX = imin(imax(pleft, outputPointX),
input.size(4) + pleft - 1) - oStartX + iStartX;
int inputPointY = imin(imax(ptop, outputPointY),
input.size(3) + ptop - 1) - oStartY + iStartY;
int inputPointZ = imin(imax(pfront, outputPointZ),
input.size(2) + pfront - 1) - oStartZ + iStartZ;
scalar_t valueToCopy =
input[batch][plane][inputPointZ][inputPointY][inputPointX];
output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy;
}
template <typename scalar_t>
__global__ void replication_pad_backward_kernel(
PackedTensorAccessor64<scalar_t, 5> gradInput,
PackedTensorAccessor64<scalar_t, 5> gradOutput,
int pfront, int pback, int ptop, int pbottom, int pleft, int pright) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= (gradOutput.size(2) * gradOutput.size(3) *
gradOutput.size(4))) {
return;
}
int outputPointX = outputPointId % gradOutput.size(4);
int outputPointY = (outputPointId / gradOutput.size(4)) %
gradOutput.size(3);
int outputPointZ = outputPointId / (gradOutput.size(3) *
gradOutput.size(4));
int iStartX = imax(0, -pleft);
int iStartY = imax(0, -ptop);
int iStartZ = imax(0, -pfront);
int oStartX = imax(0, pleft);
int oStartY = imax(0, ptop);
int oStartZ = imax(0, pfront);
int inputPointX = imin(imax(pleft, outputPointX),
gradInput.size(4) + pleft - 1) - oStartX + iStartX;
int inputPointY = imin(imax(ptop, outputPointY),
gradInput.size(3) + ptop - 1) - oStartY + iStartY;
int inputPointZ = imin(imax(pfront, outputPointZ),
gradInput.size(2) + pfront - 1) - oStartZ + iStartZ;
scalar_t valueToCopy =
gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX];
atomicAdd(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX],
valueToCopy);
}
void replication_pad1d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef paddingSize)
{
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
TORCH_CHECK(paddingSize.size() == 2, "padding Size is expected to be 2");
int padL = paddingSize[0];
int padR = paddingSize[1];
int planeDim = 0;
int dimw = 1;
int numBatch = 1;
int numInputDims = input.ndimension();
TORCH_CHECK(input.numel() > 0 && (numInputDims == 2 || numInputDims == 3),
"2D or 3D (batch mode) tensor expected for input")
if (numInputDims == 3) {
numBatch = input.size(0);
planeDim++;
dimw++;
}
int numPlanes = input.size(planeDim);
int inputW = input.size(dimw);
int outputW = inputW + padL + padR;
TORCH_CHECK(outputW >= 1,
"input (W: ", inputW, ")is too small."
" Calculated output W: ", outputW);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "replication_pad1d_cuda", [&] {
if (numInputDims == 2) {
output.resize_({numPlanes, outputW});
auto input_ = input.unsqueeze(0);
auto output_ = output.unsqueeze(0);
auto devInput = input_.packed_accessor64<scalar_t, 3>();
auto devOutput = output_.packed_accessor64<scalar_t, 3>();
int outputPlaneSize = devOutput.size(2);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.size(1),
devOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( replication_pad_forward_kernel1d) , dim3(gridSize), dim3(blockSize), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, padL, padR);
} else {
output.resize_({numBatch, numPlanes, outputW});
auto devInput = input.packed_accessor64<scalar_t, 3>();
auto devOutput = output.packed_accessor64<scalar_t, 3>();
int outputPlaneSize = devOutput.size(2);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.size(1),
devOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( replication_pad_forward_kernel1d) , dim3(gridSize), dim3(blockSize), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, padL, padR);
}
}
);
AT_CUDA_CHECK(hipGetLastError());
}
void replication_pad1d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput),
"output gradient tensor must fit into 32-bit index math");
TORCH_CHECK(paddingSize.size() == 2, "padding Size is expected to be 2");
int padL = paddingSize[0];
int padR = paddingSize[1];
int planeDim = 0;
int dimw = 1;
int numInputDims = input.ndimension();
if (numInputDims == 3) {
planeDim++;
dimw++;
}
int iwidth = input.size(dimw);
int owidth = iwidth + padL + padR;
TORCH_CHECK(owidth == gradOutput.size(dimw),
"gradOutput width unexpected. Expected: ", owidth, ", Got: ",
gradOutput.size(dimw));
gradInput.resize_as_(input);
gradInput.zero_();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "replication_pad1d_backward_cuda", [&] {
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
if (numInputDims == 2) {
gradInput_ = gradInput.unsqueeze(0);
gradOutput_ = gradOutput.unsqueeze(0);
}
auto devGradInput = gradInput_.packed_accessor64<scalar_t, 3>();
auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 3>();
int outputPlaneSize = devGradOutput.size(2);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.size(1),
devGradOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput,
padL, padR);
}
);
AT_CUDA_CHECK(hipGetLastError());
}
void replication_pad2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef paddingSize)
{
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4");
int padL = paddingSize[0];
int padR = paddingSize[1];
int padT = paddingSize[2];
int padB = paddingSize[3];
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numBatch = 1;
int numInputDims = input.dim();
TORCH_CHECK(input.numel() && (numInputDims == 3 || numInputDims == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input, but got: ",
input)
if (numInputDims == 4) {
numBatch = input.size(0);
planeDim++;
dimh++;
dimw++;
}
int numPlanes = input.size(planeDim);
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = inputH + padT + padB;
int outputW = inputW + padL + padR;
TORCH_CHECK(outputW >= 1 || outputH >= 1,
"input (H: ", inputH, ", W: ", inputW, ") is too small."
" Calculated output H: ", outputH, " W: ", outputW);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "replication_pad2d_cuda", [&] {
if (numInputDims == 3) {
output.resize_({numPlanes, outputH, outputW});
auto input_ = input.unsqueeze(0);
auto output_ = output.unsqueeze(0);
auto devInput = input_.packed_accessor64<scalar_t, 4>();
auto devOutput = output_.packed_accessor64<scalar_t, 4>();
int outputPlaneSize = devOutput.size(2) * devOutput.size(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.size(1),
devOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( replication_pad_forward_kernel2d) , dim3(gridSize), dim3(blockSize), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
devInput, devOutput, padT, padB, padL, padR);
} else {
output.resize_({numBatch, numPlanes, outputH, outputW});
auto devInput = input.packed_accessor64<scalar_t, 4>();
auto devOutput = output.packed_accessor64<scalar_t, 4>();
int outputPlaneSize = devOutput.size(2) * devOutput.size(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.size(1),
devOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( replication_pad_forward_kernel2d) , dim3(gridSize), dim3(blockSize), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput,
padT, padB, padL, padR);
}
}
);
AT_CUDA_CHECK(hipGetLastError());
}
void replication_pad2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput),
"output gradient tensor must fit into 32-bit index math");
TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4");
int padL = paddingSize[0];
int padR = paddingSize[1];
int padT = paddingSize[2];
int padB = paddingSize[3];
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numInputDims = input.dim();
if (numInputDims == 4) {
planeDim++;
dimh++;
dimw++;
}
int iheight = input.size(dimh);
int iwidth = input.size(dimw);
int oheight = iheight + padT + padB;
int owidth = iwidth + padL + padR;
TORCH_CHECK(owidth == gradOutput.size(dimw),
"gradOutput width unexpected. Expected: ", owidth, ", Got: ",
gradOutput.size(dimw));
TORCH_CHECK(oheight == gradOutput.size(dimh),
"gradOutput height unexpected. Expected: ", oheight, ", Got: ",
gradOutput.size(dimh));
gradInput.resize_as_(input);
gradInput.zero_();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "replication_pad2d_backward_cuda", [&] {
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
if (numInputDims == 3) {
gradInput_ = gradInput.unsqueeze(0);
gradOutput_ = gradOutput.unsqueeze(0);
}
auto devGradInput = gradInput_.packed_accessor64<scalar_t, 4>();
auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 4>();
int outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.size(1),
devGradOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput,
padT, padB, padL, padR);
}
);
AT_CUDA_CHECK(hipGetLastError());
}
static inline void shapeCheck3d(
const Tensor& input,
int pleft, int pright,
int ptop, int pbottom,
int pfront, int pback) {
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
int numInputDims = input.dim();
TORCH_CHECK(input.numel() && (numInputDims == 4 || numInputDims == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", input);
int planeDim = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
if (numInputDims == 5) {
planeDim++;
dimd++;
dimh++;
dimw++;
}
int numPlanes = input.size(planeDim);
int idepth = input.size(dimd);
int iheight = input.size(dimh);
int iwidth = input.size(dimw);
int odepth = idepth + pfront + pback;
int oheight = iheight + ptop + pbottom;
int owidth = iwidth + pleft + pright;
TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1,
"input (D: ", idepth, " H: ", iheight, ", W: ", iwidth,
") is too small."
" Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth);
}
static inline void shapeAndGradOutputCheck3d(
const Tensor& input,
const Tensor& gradOutput,
int pleft, int pright,
int ptop, int pbottom,
int pfront, int pback) {
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
int numInputDims = input.dim();
TORCH_CHECK(input.numel() && (numInputDims == 4 || numInputDims == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", input);
int planeDim = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
if (numInputDims == 5) {
planeDim++;
dimd++;
dimh++;
dimw++;
}
int numPlanes = input.size(planeDim);
int idepth = input.size(dimd);
int iheight = input.size(dimh);
int iwidth = input.size(dimw);
int odepth = idepth + pfront + pback;
int oheight = iheight + ptop + pbottom;
int owidth = iwidth + pleft + pright;
TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1,
"input (D: ", idepth, " H: ", iheight, ", W: ", iwidth,
") is too small."
" Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth);
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput),
"output gradient tensor must fit into 32-bit index math");
TORCH_CHECK(numPlanes == gradOutput.size(planeDim),
"gradOutput width unexpected. Expected: ", numPlanes, ", Got: ",
gradOutput.size(planeDim));
TORCH_CHECK(owidth == gradOutput.size(dimw),
"gradOutput width unexpected. Expected: ", owidth, ", Got: ",
gradOutput.size(dimw));
TORCH_CHECK(oheight == gradOutput.size(dimh),
"gradOutput height unexpected. Expected: ", oheight, ", Got: ",
gradOutput.size(dimh));
TORCH_CHECK(odepth == gradOutput.size(dimd),
"gradOutput depth unexpected. Expected: ", odepth, ", Got: ",
gradOutput.size(dimd));
}
void replication_pad3d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef paddingSize)
{
TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6");
int pleft = paddingSize[0];
int pright = paddingSize[1];
int ptop = paddingSize[2];
int pbottom = paddingSize[3];
int pfront = paddingSize[4];
int pback = paddingSize[5];
shapeCheck3d(input, pleft, pright, ptop,
pbottom, pfront, pback);
int planeDim = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
int numBatch = 1;
int numInputDims = input.dim();
if (numInputDims == 5) {
numBatch = input.size(0);
planeDim++;
dimd++;
dimh++;
dimw++;
}
int numPlanes = input.size(planeDim);
int inputD = input.size(dimd);
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputD = inputD + pfront + pback;
int outputH = inputH + ptop + pbottom;
int outputW = inputW + pleft + pright;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "replication_pad3d_cuda", [&] {
if (numInputDims == 4) {
output.resize_({numPlanes, outputD, outputH, outputW});
auto input_ = input.unsqueeze(0);
auto output_ = output.unsqueeze(0);
auto devInput = input_.packed_accessor64<scalar_t, 5>();
auto devOutput = output_.packed_accessor64<scalar_t, 5>();
int outputPlaneSize = devOutput.size(2) * devOutput.size(3) *
devOutput.size(4);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.size(1),
devOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( replication_pad_forward_kernel3d) , dim3(gridSize), dim3(blockSize), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright);
} else {
output.resize_({numBatch, numPlanes, outputD, outputH, outputW});
auto devInput = input.packed_accessor64<scalar_t, 5>();
auto devOutput = output.packed_accessor64<scalar_t, 5>();
int outputPlaneSize = devOutput.size(2) * devOutput.size(3) *
devOutput.size(4);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.size(1),
devOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( replication_pad_forward_kernel3d) , dim3(gridSize), dim3(blockSize), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright);
}
}
);
AT_CUDA_CHECK(hipGetLastError());
}
void replication_pad3d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6");
int pleft = paddingSize[0];
int pright = paddingSize[1];
int ptop = paddingSize[2];
int pbottom = paddingSize[3];
int pfront = paddingSize[4];
int pback = paddingSize[5];
shapeAndGradOutputCheck3d(input, gradOutput, pleft, pright, ptop,
pbottom, pfront, pback);
int planeDim = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
int numInputDims = input.dim();
if (numInputDims == 5) {
planeDim++;
dimd++;
dimh++;
dimw++;
}
gradInput.resize_as_(input);
gradInput.zero_();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "replication_pad3d_backward_cuda", [&] {
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
if (numInputDims == 4) {
gradInput_ = gradInput.unsqueeze(0);
gradOutput_ = gradOutput.unsqueeze(0);
}
auto devGradInput = gradInput_.packed_accessor64<scalar_t, 5>();
auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 5>();
int outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3) *
devGradOutput.size(4);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.size(1),
devGradOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
devGradInput, devGradOutput, pfront, pback, ptop, pbottom, pleft, pright);
}
);
AT_CUDA_CHECK(hipGetLastError());
}
} // namespace
Tensor& replication_pad1d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef paddingSize)
{
replication_pad1d_out_cuda_template(
output, input, paddingSize);
return output;
}
Tensor replication_pad1d_cuda(
const Tensor& input,
IntArrayRef paddingSize)
{
auto output = at::empty({0}, input.options());
replication_pad1d_out_cuda_template(
output, input, paddingSize);
return output;
}
Tensor& replication_pad1d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
replication_pad1d_backward_out_cuda_template(
gradInput, gradOutput, input, paddingSize);
return gradInput;
}
Tensor replication_pad1d_backward_cuda(
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
replication_pad1d_backward_out_cuda_template(
gradInput, gradOutput, input, paddingSize);
return gradInput;
}
Tensor& replication_pad2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef paddingSize)
{
replication_pad2d_out_cuda_template(
output, input, paddingSize);
return output;
}
Tensor replication_pad2d_cuda(
const Tensor& input,
IntArrayRef paddingSize)
{
auto output = at::empty({0}, input.options());
replication_pad2d_out_cuda_template(
output, input, paddingSize);
return output;
}
Tensor& replication_pad2d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
replication_pad2d_backward_out_cuda_template(
gradInput, gradOutput, input, paddingSize);
return gradInput;
}
Tensor replication_pad2d_backward_cuda(
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
replication_pad2d_backward_out_cuda_template(
gradInput, gradOutput, input, paddingSize);
return gradInput;
}
Tensor& replication_pad3d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef paddingSize)
{
replication_pad3d_out_cuda_template(
output, input, paddingSize);
return output;
}
Tensor replication_pad3d_cuda(
const Tensor& input,
IntArrayRef paddingSize)
{
auto output = at::empty({0}, input.options());
replication_pad3d_out_cuda_template(
output, input, paddingSize);
return output;
}
Tensor& replication_pad3d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
replication_pad3d_backward_out_cuda_template(
gradInput, gradOutput, input, paddingSize);
return gradInput;
}
Tensor replication_pad3d_backward_cuda(
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
replication_pad3d_backward_out_cuda_template(
gradInput, gradOutput, input, paddingSize);
return gradInput;
}
} // at::native
} // at
|
0867963607bd82b6b9777a0eb91ca73a4472cf52.cu
|
#include "ATen/ATen.h"
#include "ATen/cuda/CUDAApplyUtils.cuh"
#include "ATen/cuda/CUDAContext.h"
#include "ATen/NativeFunctions.h"
#include "ATen/TensorUtils.h"
#include "ATen/Utils.h"
#include "c10/util/Exception.h"
#include <THC/THCGeneral.h>
#include "THC/THCNumerics.cuh"
#include "THC/THCDeviceUtils.cuh"
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
__host__ __device__ __forceinline__ int imin(int a, int b) {
return a > b ? b : a;
}
__host__ __device__ __forceinline__ int imax(int a, int b) {
return a > b ? a : b;
}
namespace {
template <typename scalar_t>
__global__ void replication_pad_forward_kernel1d(
PackedTensorAccessor64<scalar_t, 3> input,
PackedTensorAccessor64<scalar_t, 3> output,
int padL, int padR) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= output.size(2)) {
return;
}
int outputPointX = outputPointId % output.size(2);
int iStartX = imax(0, -padL);
int oStartX = imax(0, padL);
int inputPointX = imin(imax(padL, outputPointX), input.size(2) + padL - 1) - oStartX + iStartX;
scalar_t valueToCopy = input[batch][plane][inputPointX];
output[batch][plane][outputPointX] = valueToCopy;
}
template <typename scalar_t>
__global__ void replication_pad_backward_kernel(
PackedTensorAccessor64<scalar_t, 3> gradInput,
PackedTensorAccessor64<scalar_t, 3> gradOutput,
int padL, int padR) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= gradOutput.size(2)) {
return;
}
int outputPointX = outputPointId % gradOutput.size(2);
int iStartX = imax(0, -padL);
int oStartX = imax(0, padL);
int inputPointX = imin(imax(padL, outputPointX), gradInput.size(2) + padL - 1) - oStartX + iStartX;
scalar_t valueToCopy = gradOutput[batch][plane][outputPointX];
atomicAdd(&gradInput[batch][plane][inputPointX], valueToCopy);
}
template <typename scalar_t>
__global__ void replication_pad_forward_kernel2d(
PackedTensorAccessor64<scalar_t, 4> input,
PackedTensorAccessor64<scalar_t, 4> output,
int padT, int padB, int padL, int padR) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= output.size(2) * output.size(3)) {
return;
}
int outputPointX = outputPointId % output.size(3);
int outputPointY = outputPointId / output.size(3);
int iStartX = imax(0, -padL);
int iStartY = imax(0, -padT);
int oStartX = imax(0, padL);
int oStartY = imax(0, padT);
int inputPointX = imin(imax(padL, outputPointX), input.size(3) + padL - 1) - oStartX + iStartX;
int inputPointY = imin(imax(padT, outputPointY), input.size(2) + padT - 1) - oStartY + iStartY;
scalar_t valueToCopy = input[batch][plane][inputPointY][inputPointX];
output[batch][plane][outputPointY][outputPointX] = valueToCopy;
}
template <typename scalar_t>
__global__ void replication_pad_backward_kernel(
PackedTensorAccessor64<scalar_t, 4> gradInput,
PackedTensorAccessor64<scalar_t, 4> gradOutput,
int padT, int padB, int padL, int padR) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= gradOutput.size(2) * gradOutput.size(3)) {
return;
}
int outputPointX = outputPointId % gradOutput.size(3);
int outputPointY = outputPointId / gradOutput.size(3);
int iStartX = imax(0, -padL);
int iStartY = imax(0, -padT);
int oStartX = imax(0, padL);
int oStartY = imax(0, padT);
int inputPointX = imin(imax(padL, outputPointX), gradInput.size(3) + padL - 1) - oStartX + iStartX;
int inputPointY = imin(imax(padT, outputPointY), gradInput.size(2) + padT - 1) - oStartY + iStartY;
scalar_t valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX];
atomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy);
}
template <typename scalar_t>
__global__ void replication_pad_forward_kernel3d(
PackedTensorAccessor64<scalar_t, 5> input,
PackedTensorAccessor64<scalar_t, 5> output,
int pfront, int pback, int ptop, int pbottom, int pleft, int pright) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= (output.size(2) * output.size(3) *
output.size(4))) {
return;
}
int outputPointX = outputPointId % output.size(4);
int outputPointY = (outputPointId / output.size(4)) % output.size(3);
int outputPointZ = outputPointId / (output.size(3) * output.size(4));
int iStartX = imax(0, -pleft);
int iStartY = imax(0, -ptop);
int iStartZ = imax(0, -pfront);
int oStartX = imax(0, pleft);
int oStartY = imax(0, ptop);
int oStartZ = imax(0, pfront);
int inputPointX = imin(imax(pleft, outputPointX),
input.size(4) + pleft - 1) - oStartX + iStartX;
int inputPointY = imin(imax(ptop, outputPointY),
input.size(3) + ptop - 1) - oStartY + iStartY;
int inputPointZ = imin(imax(pfront, outputPointZ),
input.size(2) + pfront - 1) - oStartZ + iStartZ;
scalar_t valueToCopy =
input[batch][plane][inputPointZ][inputPointY][inputPointX];
output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy;
}
template <typename scalar_t>
__global__ void replication_pad_backward_kernel(
PackedTensorAccessor64<scalar_t, 5> gradInput,
PackedTensorAccessor64<scalar_t, 5> gradOutput,
int pfront, int pback, int ptop, int pbottom, int pleft, int pright) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= (gradOutput.size(2) * gradOutput.size(3) *
gradOutput.size(4))) {
return;
}
int outputPointX = outputPointId % gradOutput.size(4);
int outputPointY = (outputPointId / gradOutput.size(4)) %
gradOutput.size(3);
int outputPointZ = outputPointId / (gradOutput.size(3) *
gradOutput.size(4));
int iStartX = imax(0, -pleft);
int iStartY = imax(0, -ptop);
int iStartZ = imax(0, -pfront);
int oStartX = imax(0, pleft);
int oStartY = imax(0, ptop);
int oStartZ = imax(0, pfront);
int inputPointX = imin(imax(pleft, outputPointX),
gradInput.size(4) + pleft - 1) - oStartX + iStartX;
int inputPointY = imin(imax(ptop, outputPointY),
gradInput.size(3) + ptop - 1) - oStartY + iStartY;
int inputPointZ = imin(imax(pfront, outputPointZ),
gradInput.size(2) + pfront - 1) - oStartZ + iStartZ;
scalar_t valueToCopy =
gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX];
atomicAdd(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX],
valueToCopy);
}
void replication_pad1d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef paddingSize)
{
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
TORCH_CHECK(paddingSize.size() == 2, "padding Size is expected to be 2");
int padL = paddingSize[0];
int padR = paddingSize[1];
int planeDim = 0;
int dimw = 1;
int numBatch = 1;
int numInputDims = input.ndimension();
TORCH_CHECK(input.numel() > 0 && (numInputDims == 2 || numInputDims == 3),
"2D or 3D (batch mode) tensor expected for input")
if (numInputDims == 3) {
numBatch = input.size(0);
planeDim++;
dimw++;
}
int numPlanes = input.size(planeDim);
int inputW = input.size(dimw);
int outputW = inputW + padL + padR;
TORCH_CHECK(outputW >= 1,
"input (W: ", inputW, ")is too small."
" Calculated output W: ", outputW);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "replication_pad1d_cuda", [&] {
if (numInputDims == 2) {
output.resize_({numPlanes, outputW});
auto input_ = input.unsqueeze(0);
auto output_ = output.unsqueeze(0);
auto devInput = input_.packed_accessor64<scalar_t, 3>();
auto devOutput = output_.packed_accessor64<scalar_t, 3>();
int outputPlaneSize = devOutput.size(2);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.size(1),
devOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
replication_pad_forward_kernel1d <<<gridSize, blockSize, 0,
at::cuda::getCurrentCUDAStream()>>>(devInput, devOutput, padL, padR);
} else {
output.resize_({numBatch, numPlanes, outputW});
auto devInput = input.packed_accessor64<scalar_t, 3>();
auto devOutput = output.packed_accessor64<scalar_t, 3>();
int outputPlaneSize = devOutput.size(2);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.size(1),
devOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
replication_pad_forward_kernel1d <<<gridSize, blockSize, 0,
at::cuda::getCurrentCUDAStream()>>>(devInput, devOutput, padL, padR);
}
}
);
AT_CUDA_CHECK(cudaGetLastError());
}
void replication_pad1d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput),
"output gradient tensor must fit into 32-bit index math");
TORCH_CHECK(paddingSize.size() == 2, "padding Size is expected to be 2");
int padL = paddingSize[0];
int padR = paddingSize[1];
int planeDim = 0;
int dimw = 1;
int numInputDims = input.ndimension();
if (numInputDims == 3) {
planeDim++;
dimw++;
}
int iwidth = input.size(dimw);
int owidth = iwidth + padL + padR;
TORCH_CHECK(owidth == gradOutput.size(dimw),
"gradOutput width unexpected. Expected: ", owidth, ", Got: ",
gradOutput.size(dimw));
gradInput.resize_as_(input);
gradInput.zero_();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "replication_pad1d_backward_cuda", [&] {
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
if (numInputDims == 2) {
gradInput_ = gradInput.unsqueeze(0);
gradOutput_ = gradOutput.unsqueeze(0);
}
auto devGradInput = gradInput_.packed_accessor64<scalar_t, 3>();
auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 3>();
int outputPlaneSize = devGradOutput.size(2);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.size(1),
devGradOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
replication_pad_backward_kernel <<<gridSize, blockSize, 0,
at::cuda::getCurrentCUDAStream()>>>(devGradInput, devGradOutput,
padL, padR);
}
);
AT_CUDA_CHECK(cudaGetLastError());
}
void replication_pad2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef paddingSize)
{
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4");
int padL = paddingSize[0];
int padR = paddingSize[1];
int padT = paddingSize[2];
int padB = paddingSize[3];
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numBatch = 1;
int numInputDims = input.dim();
TORCH_CHECK(input.numel() && (numInputDims == 3 || numInputDims == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input, but got: ",
input)
if (numInputDims == 4) {
numBatch = input.size(0);
planeDim++;
dimh++;
dimw++;
}
int numPlanes = input.size(planeDim);
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = inputH + padT + padB;
int outputW = inputW + padL + padR;
TORCH_CHECK(outputW >= 1 || outputH >= 1,
"input (H: ", inputH, ", W: ", inputW, ") is too small."
" Calculated output H: ", outputH, " W: ", outputW);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "replication_pad2d_cuda", [&] {
if (numInputDims == 3) {
output.resize_({numPlanes, outputH, outputW});
auto input_ = input.unsqueeze(0);
auto output_ = output.unsqueeze(0);
auto devInput = input_.packed_accessor64<scalar_t, 4>();
auto devOutput = output_.packed_accessor64<scalar_t, 4>();
int outputPlaneSize = devOutput.size(2) * devOutput.size(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.size(1),
devOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
replication_pad_forward_kernel2d <<<gridSize, blockSize, 0,
at::cuda::getCurrentCUDAStream()>>>(
devInput, devOutput, padT, padB, padL, padR);
} else {
output.resize_({numBatch, numPlanes, outputH, outputW});
auto devInput = input.packed_accessor64<scalar_t, 4>();
auto devOutput = output.packed_accessor64<scalar_t, 4>();
int outputPlaneSize = devOutput.size(2) * devOutput.size(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.size(1),
devOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
replication_pad_forward_kernel2d <<<gridSize, blockSize, 0,
at::cuda::getCurrentCUDAStream()>>>(devInput, devOutput,
padT, padB, padL, padR);
}
}
);
AT_CUDA_CHECK(cudaGetLastError());
}
void replication_pad2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput),
"output gradient tensor must fit into 32-bit index math");
TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4");
int padL = paddingSize[0];
int padR = paddingSize[1];
int padT = paddingSize[2];
int padB = paddingSize[3];
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numInputDims = input.dim();
if (numInputDims == 4) {
planeDim++;
dimh++;
dimw++;
}
int iheight = input.size(dimh);
int iwidth = input.size(dimw);
int oheight = iheight + padT + padB;
int owidth = iwidth + padL + padR;
TORCH_CHECK(owidth == gradOutput.size(dimw),
"gradOutput width unexpected. Expected: ", owidth, ", Got: ",
gradOutput.size(dimw));
TORCH_CHECK(oheight == gradOutput.size(dimh),
"gradOutput height unexpected. Expected: ", oheight, ", Got: ",
gradOutput.size(dimh));
gradInput.resize_as_(input);
gradInput.zero_();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "replication_pad2d_backward_cuda", [&] {
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
if (numInputDims == 3) {
gradInput_ = gradInput.unsqueeze(0);
gradOutput_ = gradOutput.unsqueeze(0);
}
auto devGradInput = gradInput_.packed_accessor64<scalar_t, 4>();
auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 4>();
int outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.size(1),
devGradOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
replication_pad_backward_kernel <<<gridSize, blockSize, 0,
at::cuda::getCurrentCUDAStream()>>>(devGradInput, devGradOutput,
padT, padB, padL, padR);
}
);
AT_CUDA_CHECK(cudaGetLastError());
}
static inline void shapeCheck3d(
const Tensor& input,
int pleft, int pright,
int ptop, int pbottom,
int pfront, int pback) {
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
int numInputDims = input.dim();
TORCH_CHECK(input.numel() && (numInputDims == 4 || numInputDims == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", input);
int planeDim = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
if (numInputDims == 5) {
planeDim++;
dimd++;
dimh++;
dimw++;
}
int numPlanes = input.size(planeDim);
int idepth = input.size(dimd);
int iheight = input.size(dimh);
int iwidth = input.size(dimw);
int odepth = idepth + pfront + pback;
int oheight = iheight + ptop + pbottom;
int owidth = iwidth + pleft + pright;
TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1,
"input (D: ", idepth, " H: ", iheight, ", W: ", iwidth,
") is too small."
" Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth);
}
static inline void shapeAndGradOutputCheck3d(
const Tensor& input,
const Tensor& gradOutput,
int pleft, int pright,
int ptop, int pbottom,
int pfront, int pback) {
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
int numInputDims = input.dim();
TORCH_CHECK(input.numel() && (numInputDims == 4 || numInputDims == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", input);
int planeDim = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
if (numInputDims == 5) {
planeDim++;
dimd++;
dimh++;
dimw++;
}
int numPlanes = input.size(planeDim);
int idepth = input.size(dimd);
int iheight = input.size(dimh);
int iwidth = input.size(dimw);
int odepth = idepth + pfront + pback;
int oheight = iheight + ptop + pbottom;
int owidth = iwidth + pleft + pright;
TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1,
"input (D: ", idepth, " H: ", iheight, ", W: ", iwidth,
") is too small."
" Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth);
TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput),
"output gradient tensor must fit into 32-bit index math");
TORCH_CHECK(numPlanes == gradOutput.size(planeDim),
"gradOutput width unexpected. Expected: ", numPlanes, ", Got: ",
gradOutput.size(planeDim));
TORCH_CHECK(owidth == gradOutput.size(dimw),
"gradOutput width unexpected. Expected: ", owidth, ", Got: ",
gradOutput.size(dimw));
TORCH_CHECK(oheight == gradOutput.size(dimh),
"gradOutput height unexpected. Expected: ", oheight, ", Got: ",
gradOutput.size(dimh));
TORCH_CHECK(odepth == gradOutput.size(dimd),
"gradOutput depth unexpected. Expected: ", odepth, ", Got: ",
gradOutput.size(dimd));
}
void replication_pad3d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef paddingSize)
{
TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6");
int pleft = paddingSize[0];
int pright = paddingSize[1];
int ptop = paddingSize[2];
int pbottom = paddingSize[3];
int pfront = paddingSize[4];
int pback = paddingSize[5];
shapeCheck3d(input, pleft, pright, ptop,
pbottom, pfront, pback);
int planeDim = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
int numBatch = 1;
int numInputDims = input.dim();
if (numInputDims == 5) {
numBatch = input.size(0);
planeDim++;
dimd++;
dimh++;
dimw++;
}
int numPlanes = input.size(planeDim);
int inputD = input.size(dimd);
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputD = inputD + pfront + pback;
int outputH = inputH + ptop + pbottom;
int outputW = inputW + pleft + pright;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "replication_pad3d_cuda", [&] {
if (numInputDims == 4) {
output.resize_({numPlanes, outputD, outputH, outputW});
auto input_ = input.unsqueeze(0);
auto output_ = output.unsqueeze(0);
auto devInput = input_.packed_accessor64<scalar_t, 5>();
auto devOutput = output_.packed_accessor64<scalar_t, 5>();
int outputPlaneSize = devOutput.size(2) * devOutput.size(3) *
devOutput.size(4);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.size(1),
devOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
replication_pad_forward_kernel3d <<<gridSize, blockSize, 0,
at::cuda::getCurrentCUDAStream()>>>(
devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright);
} else {
output.resize_({numBatch, numPlanes, outputD, outputH, outputW});
auto devInput = input.packed_accessor64<scalar_t, 5>();
auto devOutput = output.packed_accessor64<scalar_t, 5>();
int outputPlaneSize = devOutput.size(2) * devOutput.size(3) *
devOutput.size(4);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.size(1),
devOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
replication_pad_forward_kernel3d <<<gridSize, blockSize, 0,
at::cuda::getCurrentCUDAStream()>>>(
devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright);
}
}
);
AT_CUDA_CHECK(cudaGetLastError());
}
void replication_pad3d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6");
int pleft = paddingSize[0];
int pright = paddingSize[1];
int ptop = paddingSize[2];
int pbottom = paddingSize[3];
int pfront = paddingSize[4];
int pback = paddingSize[5];
shapeAndGradOutputCheck3d(input, gradOutput, pleft, pright, ptop,
pbottom, pfront, pback);
int planeDim = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
int numInputDims = input.dim();
if (numInputDims == 5) {
planeDim++;
dimd++;
dimh++;
dimw++;
}
gradInput.resize_as_(input);
gradInput.zero_();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "replication_pad3d_backward_cuda", [&] {
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
if (numInputDims == 4) {
gradInput_ = gradInput.unsqueeze(0);
gradOutput_ = gradOutput.unsqueeze(0);
}
auto devGradInput = gradInput_.packed_accessor64<scalar_t, 5>();
auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 5>();
int outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3) *
devGradOutput.size(4);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.size(1),
devGradOutput.size(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
replication_pad_backward_kernel <<<gridSize, blockSize, 0,
at::cuda::getCurrentCUDAStream()>>>(
devGradInput, devGradOutput, pfront, pback, ptop, pbottom, pleft, pright);
}
);
AT_CUDA_CHECK(cudaGetLastError());
}
} // namespace
Tensor& replication_pad1d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef paddingSize)
{
replication_pad1d_out_cuda_template(
output, input, paddingSize);
return output;
}
Tensor replication_pad1d_cuda(
const Tensor& input,
IntArrayRef paddingSize)
{
auto output = at::empty({0}, input.options());
replication_pad1d_out_cuda_template(
output, input, paddingSize);
return output;
}
Tensor& replication_pad1d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
replication_pad1d_backward_out_cuda_template(
gradInput, gradOutput, input, paddingSize);
return gradInput;
}
Tensor replication_pad1d_backward_cuda(
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
replication_pad1d_backward_out_cuda_template(
gradInput, gradOutput, input, paddingSize);
return gradInput;
}
Tensor& replication_pad2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef paddingSize)
{
replication_pad2d_out_cuda_template(
output, input, paddingSize);
return output;
}
Tensor replication_pad2d_cuda(
const Tensor& input,
IntArrayRef paddingSize)
{
auto output = at::empty({0}, input.options());
replication_pad2d_out_cuda_template(
output, input, paddingSize);
return output;
}
Tensor& replication_pad2d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
replication_pad2d_backward_out_cuda_template(
gradInput, gradOutput, input, paddingSize);
return gradInput;
}
Tensor replication_pad2d_backward_cuda(
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
replication_pad2d_backward_out_cuda_template(
gradInput, gradOutput, input, paddingSize);
return gradInput;
}
Tensor& replication_pad3d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef paddingSize)
{
replication_pad3d_out_cuda_template(
output, input, paddingSize);
return output;
}
Tensor replication_pad3d_cuda(
const Tensor& input,
IntArrayRef paddingSize)
{
auto output = at::empty({0}, input.options());
replication_pad3d_out_cuda_template(
output, input, paddingSize);
return output;
}
Tensor& replication_pad3d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
replication_pad3d_backward_out_cuda_template(
gradInput, gradOutput, input, paddingSize);
return gradInput;
}
Tensor replication_pad3d_backward_cuda(
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef paddingSize)
{
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
replication_pad3d_backward_out_cuda_template(
gradInput, gradOutput, input, paddingSize);
return gradInput;
}
} // at::native
} // at
|
f55b1d9d758d2b45a6de342678970086ec321d80.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "stdlib.h"
#include "stdio.h"
#include <math.h>
#include <hip/hip_runtime.h>
const int max_val=100;
void generateArray(float* data, int size);
__global__
void vectAddKernel(float* A, float* B, float* C, int n){
int i = threadIdx.x+blockDim.x*blockIdx.x;
if (i<n){
*(C+i)=*(A+i)+*(B+i);
}
}
void vectorAdd(float* A, float* B,float* C, int n){
int size=sizeof(float)*n;
float* d_A;
float* d_B;
float* d_C;
int _sa = hipMalloc((void**)(&d_A),size);
int _sb = hipMalloc((void**)(&d_B),size);
int _sc = hipMalloc((void**)(&d_C),size);
int _cma=hipMemcpy(d_A,A,size,hipMemcpyHostToDevice);
int _cmb=hipMemcpy(d_B,B,size,hipMemcpyHostToDevice);
dim3 grid (ceil(n/256.0), 1, 1);
dim3 block (256, 1, 1);
hipLaunchKernelGGL(( vectAddKernel), dim3(grid),dim3(block), 0, 0, d_A,d_B,d_C,n);
int _cmc=hipMemcpy(C,d_C,size,hipMemcpyDeviceToHost);
hipFree((void**)&d_A);
hipFree((void**)&d_B);
hipFree((void**)&d_C);
}
int main(int argc, char* argv[]){
if (argc != 2){
printf("Numero incorrecto de argumentos\n");
return -1;
}
int n = atoi(argv[1]);
float* arr1 = (float*)malloc(sizeof(float)*n);
float* arr2 = (float*)malloc(sizeof(float)*n);
float* res = (float*)malloc(sizeof(float)*n);
generateArray(arr1,n);
generateArray(arr2,n);
vectorAdd(arr1,arr2,res,n);
/*
printf("Array 1:");
for(int i=0;i<n;i++){
printf(" %f",*(arr1+i));
}
printf("\n");
printf("Array 2:");
for(int i=0;i<n;i++){
printf(" %f",*(arr2+i));
}
printf("\n");
printf("Res:");
for(int i=0;i<n;i++){
printf(" %f",*(res+i));
}
printf("\n");*/
}
void generateArray(float* data, int size){
for(int i=0;i<size;i++){
*(data+i)=rand() % max_val;
}
}
|
f55b1d9d758d2b45a6de342678970086ec321d80.cu
|
#include "stdlib.h"
#include "stdio.h"
#include <math.h>
#include <cuda.h>
const int max_val=100;
void generateArray(float* data, int size);
__global__
void vectAddKernel(float* A, float* B, float* C, int n){
int i = threadIdx.x+blockDim.x*blockIdx.x;
if (i<n){
*(C+i)=*(A+i)+*(B+i);
}
}
void vectorAdd(float* A, float* B,float* C, int n){
int size=sizeof(float)*n;
float* d_A;
float* d_B;
float* d_C;
int _sa = cudaMalloc((void**)(&d_A),size);
int _sb = cudaMalloc((void**)(&d_B),size);
int _sc = cudaMalloc((void**)(&d_C),size);
int _cma=cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
int _cmb=cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
dim3 grid (ceil(n/256.0), 1, 1);
dim3 block (256, 1, 1);
vectAddKernel<<<grid,block>>>(d_A,d_B,d_C,n);
int _cmc=cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
cudaFree((void**)&d_A);
cudaFree((void**)&d_B);
cudaFree((void**)&d_C);
}
int main(int argc, char* argv[]){
if (argc != 2){
printf("Numero incorrecto de argumentos\n");
return -1;
}
int n = atoi(argv[1]);
float* arr1 = (float*)malloc(sizeof(float)*n);
float* arr2 = (float*)malloc(sizeof(float)*n);
float* res = (float*)malloc(sizeof(float)*n);
generateArray(arr1,n);
generateArray(arr2,n);
vectorAdd(arr1,arr2,res,n);
/*
printf("Array 1:");
for(int i=0;i<n;i++){
printf(" %f",*(arr1+i));
}
printf("\n");
printf("Array 2:");
for(int i=0;i<n;i++){
printf(" %f",*(arr2+i));
}
printf("\n");
printf("Res:");
for(int i=0;i<n;i++){
printf(" %f",*(res+i));
}
printf("\n");*/
}
void generateArray(float* data, int size){
for(int i=0;i<size;i++){
*(data+i)=rand() % max_val;
}
}
|
6fdf66bac69535ad6f5ada6d07a7849eeda7c171.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <cuml/tsa/holtwinters.h>
#include <gtest/gtest.h>
#include <test_utils.h>
#include <algorithm>
#include <cuda_utils.cuh>
#include <cuml/common/logger.hpp>
#include "time_series_datasets.h"
namespace ML {
using namespace MLCommon;
template <typename T>
struct HoltWintersInputs {
T *dataset_h;
T *test;
int n;
int h;
int batch_size;
int frequency;
ML::SeasonalType seasonal;
int start_periods;
T epsilon;
T mae_tolerance;
};
template <typename T>
class HoltWintersTest : public ::testing::TestWithParam<HoltWintersInputs<T>> {
public:
void basicTest() {
params = ::testing::TestWithParam<HoltWintersInputs<T>>::GetParam();
dataset_h = params.dataset_h;
test = params.test;
n = params.n;
h = params.h;
batch_size = params.batch_size;
frequency = params.frequency;
ML::SeasonalType seasonal = params.seasonal;
start_periods = params.start_periods;
epsilon = params.epsilon;
mae_tolerance = params.mae_tolerance;
CUDA_CHECK(hipStreamCreate(&stream));
ML::HoltWinters::buffer_size(
n, batch_size, frequency,
&leveltrend_seed_len, // = batch_size
&season_seed_len, // = frequency*batch_size
&components_len, // = (n-w_len)*batch_size
&error_len, // = batch_size
&leveltrend_coef_offset, // = (n-wlen-1)*batch_size (last row)
&season_coef_offset); // = (n-wlen-frequency)*batch_size(last freq rows)
allocate(level_ptr, components_len, stream);
allocate(trend_ptr, components_len, stream);
allocate(season_ptr, components_len, stream);
allocate(SSE_error_ptr, batch_size, stream);
allocate(forecast_ptr, batch_size * h, stream);
allocate(data, batch_size * n);
updateDevice(data, dataset_h, batch_size * n, stream);
cumlHandle handle;
handle.setStream(stream);
ML::HoltWinters::fit(handle, n, batch_size, frequency, start_periods,
seasonal, epsilon, data, level_ptr, trend_ptr,
season_ptr, SSE_error_ptr);
ML::HoltWinters::forecast(handle, n, batch_size, frequency, h, seasonal,
level_ptr, trend_ptr, season_ptr, forecast_ptr);
CUDA_CHECK(hipStreamSynchronize(stream));
}
void SetUp() override { basicTest(); }
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(level_ptr));
CUDA_CHECK(hipFree(trend_ptr));
CUDA_CHECK(hipFree(season_ptr));
CUDA_CHECK(hipFree(SSE_error_ptr));
CUDA_CHECK(hipFree(forecast_ptr));
CUDA_CHECK(hipStreamDestroy(stream));
}
public:
hipStream_t stream;
HoltWintersInputs<T> params;
T *dataset_h, *test;
T *data;
int n, h;
int leveltrend_seed_len, season_seed_len, components_len;
int leveltrend_coef_offset, season_coef_offset;
int error_len;
int batch_size, frequency, start_periods;
T *SSE_error_ptr, *level_ptr, *trend_ptr, *season_ptr, *forecast_ptr;
T epsilon, mae_tolerance;
};
const std::vector<HoltWintersInputs<float>> inputsf = {
{additive_trainf.data(), additive_testf.data(), 90, 10, 1, 25,
ML::SeasonalType::ADDITIVE, 2, 2.24e-3, 1e-6},
{multiplicative_trainf.data(), multiplicative_testf.data(), 132, 12, 1, 12,
ML::SeasonalType::MULTIPLICATIVE, 2, 2.24e-3, 3e-2},
{additive_normalized_trainf.data(), additive_normalized_testf.data(), 90, 10,
1, 25, ML::SeasonalType::ADDITIVE, 2, 2.24e-3, 1e-6},
{multiplicative_normalized_trainf.data(),
multiplicative_normalized_testf.data(), 132, 12, 1, 12,
ML::SeasonalType::MULTIPLICATIVE, 2, 2.24e-3, 2.5e-1}};
const std::vector<HoltWintersInputs<double>> inputsd = {
{additive_traind.data(), additive_testd.data(), 90, 10, 1, 25,
ML::SeasonalType::ADDITIVE, 2, 2.24e-7, 1e-6},
{multiplicative_traind.data(), multiplicative_testd.data(), 132, 12, 1, 12,
ML::SeasonalType::MULTIPLICATIVE, 2, 2.24e-7, 3e-2},
{additive_normalized_traind.data(), additive_normalized_testd.data(), 90, 10,
1, 25, ML::SeasonalType::ADDITIVE, 2, 2.24e-7, 1e-6},
{multiplicative_normalized_traind.data(),
multiplicative_normalized_testd.data(), 132, 12, 1, 12,
ML::SeasonalType::MULTIPLICATIVE, 2, 2.24e-7, 5e-2}};
template <typename T>
void normalise(T *data, int len) {
T min = *std::min_element(data, data + len);
T max = *std::max_element(data, data + len);
for (int i = 0; i < len; i++) {
data[i] = (data[i] - min) / (max - min);
}
}
template <typename T>
T calculate_MAE(T *test, T *forecast, int batch_size, int h) {
normalise(test, batch_size * h);
normalise(forecast, batch_size * h);
std::vector<T> ae(batch_size * h);
for (int i = 0; i < batch_size * h; i++) {
ae[i] = abs(test[i] - forecast[i]);
}
std::sort(ae.begin(), ae.end());
T mae;
if (h % 2 == 0) {
mae = (ae[h / 2 - 1] + ae[h / 2]) / 2;
} else {
mae = ae[(int)h / 2];
}
return mae;
}
typedef HoltWintersTest<float> HoltWintersTestF;
TEST_P(HoltWintersTestF, Fit) {
std::vector<float> forecast_h(batch_size * h);
updateHost(forecast_h.data(), forecast_ptr, batch_size * h, stream);
myPrintHostVector("forecast", forecast_h.data(), batch_size * h);
float mae = calculate_MAE<float>(test, forecast_h.data(), batch_size, h);
CUML_LOG_DEBUG("MAE: %f", mae);
ASSERT_TRUE(mae < mae_tolerance);
}
typedef HoltWintersTest<double> HoltWintersTestD;
TEST_P(HoltWintersTestD, Fit) {
std::vector<double> forecast_h(batch_size * h);
updateHost(forecast_h.data(), forecast_ptr, batch_size * h, stream);
myPrintHostVector("forecast", forecast_h.data(), batch_size * h);
double mae = calculate_MAE<double>(test, forecast_h.data(), batch_size, h);
CUML_LOG_DEBUG("MAE: %f", mae);
ASSERT_TRUE(mae < mae_tolerance);
}
INSTANTIATE_TEST_CASE_P(HoltWintersTests, HoltWintersTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(HoltWintersTests, HoltWintersTestD,
::testing::ValuesIn(inputsd));
} // namespace ML
|
6fdf66bac69535ad6f5ada6d07a7849eeda7c171.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <cuml/tsa/holtwinters.h>
#include <gtest/gtest.h>
#include <test_utils.h>
#include <algorithm>
#include <cuda_utils.cuh>
#include <cuml/common/logger.hpp>
#include "time_series_datasets.h"
namespace ML {
using namespace MLCommon;
template <typename T>
struct HoltWintersInputs {
T *dataset_h;
T *test;
int n;
int h;
int batch_size;
int frequency;
ML::SeasonalType seasonal;
int start_periods;
T epsilon;
T mae_tolerance;
};
template <typename T>
class HoltWintersTest : public ::testing::TestWithParam<HoltWintersInputs<T>> {
public:
void basicTest() {
params = ::testing::TestWithParam<HoltWintersInputs<T>>::GetParam();
dataset_h = params.dataset_h;
test = params.test;
n = params.n;
h = params.h;
batch_size = params.batch_size;
frequency = params.frequency;
ML::SeasonalType seasonal = params.seasonal;
start_periods = params.start_periods;
epsilon = params.epsilon;
mae_tolerance = params.mae_tolerance;
CUDA_CHECK(cudaStreamCreate(&stream));
ML::HoltWinters::buffer_size(
n, batch_size, frequency,
&leveltrend_seed_len, // = batch_size
&season_seed_len, // = frequency*batch_size
&components_len, // = (n-w_len)*batch_size
&error_len, // = batch_size
&leveltrend_coef_offset, // = (n-wlen-1)*batch_size (last row)
&season_coef_offset); // = (n-wlen-frequency)*batch_size(last freq rows)
allocate(level_ptr, components_len, stream);
allocate(trend_ptr, components_len, stream);
allocate(season_ptr, components_len, stream);
allocate(SSE_error_ptr, batch_size, stream);
allocate(forecast_ptr, batch_size * h, stream);
allocate(data, batch_size * n);
updateDevice(data, dataset_h, batch_size * n, stream);
cumlHandle handle;
handle.setStream(stream);
ML::HoltWinters::fit(handle, n, batch_size, frequency, start_periods,
seasonal, epsilon, data, level_ptr, trend_ptr,
season_ptr, SSE_error_ptr);
ML::HoltWinters::forecast(handle, n, batch_size, frequency, h, seasonal,
level_ptr, trend_ptr, season_ptr, forecast_ptr);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void SetUp() override { basicTest(); }
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(level_ptr));
CUDA_CHECK(cudaFree(trend_ptr));
CUDA_CHECK(cudaFree(season_ptr));
CUDA_CHECK(cudaFree(SSE_error_ptr));
CUDA_CHECK(cudaFree(forecast_ptr));
CUDA_CHECK(cudaStreamDestroy(stream));
}
public:
cudaStream_t stream;
HoltWintersInputs<T> params;
T *dataset_h, *test;
T *data;
int n, h;
int leveltrend_seed_len, season_seed_len, components_len;
int leveltrend_coef_offset, season_coef_offset;
int error_len;
int batch_size, frequency, start_periods;
T *SSE_error_ptr, *level_ptr, *trend_ptr, *season_ptr, *forecast_ptr;
T epsilon, mae_tolerance;
};
const std::vector<HoltWintersInputs<float>> inputsf = {
{additive_trainf.data(), additive_testf.data(), 90, 10, 1, 25,
ML::SeasonalType::ADDITIVE, 2, 2.24e-3, 1e-6},
{multiplicative_trainf.data(), multiplicative_testf.data(), 132, 12, 1, 12,
ML::SeasonalType::MULTIPLICATIVE, 2, 2.24e-3, 3e-2},
{additive_normalized_trainf.data(), additive_normalized_testf.data(), 90, 10,
1, 25, ML::SeasonalType::ADDITIVE, 2, 2.24e-3, 1e-6},
{multiplicative_normalized_trainf.data(),
multiplicative_normalized_testf.data(), 132, 12, 1, 12,
ML::SeasonalType::MULTIPLICATIVE, 2, 2.24e-3, 2.5e-1}};
const std::vector<HoltWintersInputs<double>> inputsd = {
{additive_traind.data(), additive_testd.data(), 90, 10, 1, 25,
ML::SeasonalType::ADDITIVE, 2, 2.24e-7, 1e-6},
{multiplicative_traind.data(), multiplicative_testd.data(), 132, 12, 1, 12,
ML::SeasonalType::MULTIPLICATIVE, 2, 2.24e-7, 3e-2},
{additive_normalized_traind.data(), additive_normalized_testd.data(), 90, 10,
1, 25, ML::SeasonalType::ADDITIVE, 2, 2.24e-7, 1e-6},
{multiplicative_normalized_traind.data(),
multiplicative_normalized_testd.data(), 132, 12, 1, 12,
ML::SeasonalType::MULTIPLICATIVE, 2, 2.24e-7, 5e-2}};
template <typename T>
void normalise(T *data, int len) {
T min = *std::min_element(data, data + len);
T max = *std::max_element(data, data + len);
for (int i = 0; i < len; i++) {
data[i] = (data[i] - min) / (max - min);
}
}
template <typename T>
T calculate_MAE(T *test, T *forecast, int batch_size, int h) {
normalise(test, batch_size * h);
normalise(forecast, batch_size * h);
std::vector<T> ae(batch_size * h);
for (int i = 0; i < batch_size * h; i++) {
ae[i] = abs(test[i] - forecast[i]);
}
std::sort(ae.begin(), ae.end());
T mae;
if (h % 2 == 0) {
mae = (ae[h / 2 - 1] + ae[h / 2]) / 2;
} else {
mae = ae[(int)h / 2];
}
return mae;
}
typedef HoltWintersTest<float> HoltWintersTestF;
TEST_P(HoltWintersTestF, Fit) {
std::vector<float> forecast_h(batch_size * h);
updateHost(forecast_h.data(), forecast_ptr, batch_size * h, stream);
myPrintHostVector("forecast", forecast_h.data(), batch_size * h);
float mae = calculate_MAE<float>(test, forecast_h.data(), batch_size, h);
CUML_LOG_DEBUG("MAE: %f", mae);
ASSERT_TRUE(mae < mae_tolerance);
}
typedef HoltWintersTest<double> HoltWintersTestD;
TEST_P(HoltWintersTestD, Fit) {
std::vector<double> forecast_h(batch_size * h);
updateHost(forecast_h.data(), forecast_ptr, batch_size * h, stream);
myPrintHostVector("forecast", forecast_h.data(), batch_size * h);
double mae = calculate_MAE<double>(test, forecast_h.data(), batch_size, h);
CUML_LOG_DEBUG("MAE: %f", mae);
ASSERT_TRUE(mae < mae_tolerance);
}
INSTANTIATE_TEST_CASE_P(HoltWintersTests, HoltWintersTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(HoltWintersTests, HoltWintersTestD,
::testing::ValuesIn(inputsd));
} // namespace ML
|
e1c4a68ec32943f51430125c4887486ccf379633.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "DataFormats/Common/interface/DataFrame.h"
#include "DataFormats/DetId/interface/DetId.h"
#include "DataFormats/HcalDetId/interface/HcalDetId.h"
#include "DataFormats/HcalDigi/interface/HBHEDataFrame.h"
#include "DataFormats/HcalDigi/interface/HcalDigiCollections.h"
#include "DataFormats/HcalDigi/interface/QIE10DataFrame.h"
#include "DataFormats/HcalDigi/interface/QIE11DataFrame.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
__global__ void kernel_test_hcal_qiesample(HcalQIESample *sample, uint16_t value) {
printf("kernel: testing hcal qie sampel\n");
printf("%f %f %f\n", nominal_adc2fc[0], nominal_adc2fc[1], nominal_adc2fc[2]);
HcalQIESample tmp{value};
*sample = tmp;
}
__global__ void kernel_test_hcal_qie8_hbhedf(HBHEDataFrame *df) {
printf("kernel: testing hcal hbhe dataframe\n");
df->setSize(10);
for (auto i = 0; i < 10; i++)
df->setSample(i, HcalQIESample(100));
df->setReadoutIds(HcalElectronicsId(100));
}
void test_hcal_qiesample() {
HcalQIESample h_sample, h_test_sample0{100}, h_test_sample1;
HcalQIESample *d_sample;
hipMalloc((void **)&d_sample, sizeof(HcalQIESample));
hipMemcpy(d_sample, &h_sample, sizeof(HcalQIESample), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_test_hcal_qiesample), dim3(1), dim3(1), 0, 0, d_sample, 100);
hipMemcpy(&h_sample, d_sample, sizeof(HcalQIESample), hipMemcpyDeviceToHost);
assert(h_sample() == h_test_sample0());
assert(h_sample() != h_test_sample1());
}
template <typename TDF>
__global__ void kernel_test_hcal_qie8_digis(TDF *pdfs, uint32_t *out) {
int id = threadIdx.x;
uint32_t sum = 0;
for (auto i = 0; i < 10; i++)
sum += pdfs[id].sample(i).raw();
out[id] = sum;
}
template <typename TDF>
__global__ void kernel_test_hcal_qie1011_digis(uint16_t *pdfs, uint32_t *out, int samples) {
printf("kernel: testing hcal qie1011 df\n");
int id = threadIdx.x;
uint32_t sum = 0;
int nwords = TDF::WORDS_PER_SAMPLE * samples + TDF::HEADER_WORDS + TDF::FLAG_WORDS;
TDF df(edm::DataFrame(0, pdfs + id * nwords, nwords));
for (auto i = 0; i < df.samples(); i++) {
sum += df[i].adc();
}
out[id] = sum;
}
template <typename TDF>
void test_hcal_qie1011_digis() {
constexpr int size = 10;
constexpr int samples = 10;
constexpr int detid = 2;
HcalDataFrameContainer<TDF> coll{samples, detid};
uint16_t *d_data;
uint32_t *d_out;
uint32_t h_out[size], h_test_out[size];
for (auto i = 0; i < size; i++) {
// #words per single TDF
uint16_t tmp[TDF::WORDS_PER_SAMPLE * samples + TDF::HEADER_WORDS + TDF::FLAG_WORDS];
h_test_out[i] = 0;
for (auto j = TDF::HEADER_WORDS; j < TDF::WORDS_PER_SAMPLE * samples + TDF::HEADER_WORDS; j++) {
tmp[j] = 100;
}
TDF df(edm::DataFrame(0, tmp, TDF::WORDS_PER_SAMPLE * samples + TDF::HEADER_WORDS + TDF::FLAG_WORDS));
for (auto j = 0; j < df.samples(); j++)
h_test_out[i] += df[j].adc();
coll.addDataFrame(DetId{(uint32_t)i}, (uint16_t *)&tmp);
}
hipMalloc((void **)&d_data,
size * (TDF::WORDS_PER_SAMPLE * samples + TDF::HEADER_WORDS + TDF::FLAG_WORDS) * sizeof(uint16_t));
hipMalloc((void **)&d_out, size * sizeof(uint32_t));
hipMemcpy(d_data,
coll.frame(0),
size * (TDF::WORDS_PER_SAMPLE * samples + TDF::HEADER_WORDS + TDF::FLAG_WORDS) * sizeof(uint16_t),
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_test_hcal_qie1011_digis<TDF>), dim3(1), dim3(size), 0, 0, d_data, d_out, samples);
hipDeviceSynchronize();
auto code = hipGetLastError();
if (code != hipSuccess)
std::cout << hipGetErrorString(code);
hipMemcpy(&h_out, d_out, size * sizeof(uint32_t), hipMemcpyDeviceToHost);
// comparison
for (auto i = 0; i < size; i++) {
std::cout << h_out[i] << " == " << h_test_out[i] << std::endl;
assert(h_out[i] == h_test_out[i]);
}
}
template <typename TDF>
void test_hcal_qie8_digis() {
constexpr int n = 10;
edm::SortedCollection<TDF> coll{n};
TDF *d_dfs;
uint32_t *d_out;
uint32_t h_out[n], h_test_out[n];
for (auto i = 0; i < n; i++) {
TDF &df = coll[i];
df.setSize(10);
h_test_out[i] = 0;
uint32_t test = 0;
for (auto j = 0; j < 10; j++) {
df.setSample(j, HcalQIESample(100));
h_test_out[i] += df.sample(j).raw();
test += df.sample(j).raw();
}
}
hipMalloc((void **)&d_dfs, n * sizeof(TDF));
hipMalloc((void **)&d_out, n * sizeof(uint32_t));
hipMemcpy(d_dfs, &(*coll.begin()), n * sizeof(TDF), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_test_hcal_qie8_digis), dim3(1), dim3(n), 0, 0, d_dfs, d_out);
hipMemcpy(&h_out, d_out, n * sizeof(uint32_t), hipMemcpyDeviceToHost);
std::cout << "collection size = " << coll.size() << std::endl;
// comparison
for (auto i = 0; i < n; i++) {
std::cout << h_out[i] << " == " << h_test_out[i] << std::endl;
assert(h_out[i] == h_test_out[i]);
}
}
void test_hcal_qie8_hbhedf() {
HBHEDataFrame h_df, h_test_df;
HBHEDataFrame *d_df;
h_test_df.setSize(10);
for (auto i = 0; i < 10; i++)
h_test_df.setSample(i, HcalQIESample(100));
h_test_df.setReadoutIds(HcalElectronicsId(100));
hipMalloc((void **)&d_df, sizeof(HBHEDataFrame));
hipMemcpy(d_df, &h_df, sizeof(HBHEDataFrame), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_test_hcal_qie8_hbhedf), dim3(1), dim3(1), 0, 0, d_df);
hipMemcpy(&h_df, d_df, sizeof(HBHEDataFrame), hipMemcpyDeviceToHost);
assert(h_df.size() == h_test_df.size());
assert(h_df.elecId() == h_test_df.elecId());
for (auto i = 0; i < 10; i++)
assert(h_df[i].raw() == h_test_df[i].raw());
}
int main(int argc, char **argv) {
cms::cudatest::requireDevices();
// qie8
test_hcal_qiesample();
test_hcal_qie8_hbhedf();
test_hcal_qie8_digis<HBHEDataFrame>();
test_hcal_qie8_digis<HFDataFrame>();
test_hcal_qie8_digis<HODataFrame>();
// qie1011
test_hcal_qie1011_digis<QIE10DataFrame>();
test_hcal_qie1011_digis<QIE11DataFrame>();
return 0;
}
|
e1c4a68ec32943f51430125c4887486ccf379633.cu
|
#include <cassert>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include "DataFormats/Common/interface/DataFrame.h"
#include "DataFormats/DetId/interface/DetId.h"
#include "DataFormats/HcalDetId/interface/HcalDetId.h"
#include "DataFormats/HcalDigi/interface/HBHEDataFrame.h"
#include "DataFormats/HcalDigi/interface/HcalDigiCollections.h"
#include "DataFormats/HcalDigi/interface/QIE10DataFrame.h"
#include "DataFormats/HcalDigi/interface/QIE11DataFrame.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
__global__ void kernel_test_hcal_qiesample(HcalQIESample *sample, uint16_t value) {
printf("kernel: testing hcal qie sampel\n");
printf("%f %f %f\n", nominal_adc2fc[0], nominal_adc2fc[1], nominal_adc2fc[2]);
HcalQIESample tmp{value};
*sample = tmp;
}
__global__ void kernel_test_hcal_qie8_hbhedf(HBHEDataFrame *df) {
printf("kernel: testing hcal hbhe dataframe\n");
df->setSize(10);
for (auto i = 0; i < 10; i++)
df->setSample(i, HcalQIESample(100));
df->setReadoutIds(HcalElectronicsId(100));
}
void test_hcal_qiesample() {
HcalQIESample h_sample, h_test_sample0{100}, h_test_sample1;
HcalQIESample *d_sample;
cudaMalloc((void **)&d_sample, sizeof(HcalQIESample));
cudaMemcpy(d_sample, &h_sample, sizeof(HcalQIESample), cudaMemcpyHostToDevice);
kernel_test_hcal_qiesample<<<1, 1>>>(d_sample, 100);
cudaMemcpy(&h_sample, d_sample, sizeof(HcalQIESample), cudaMemcpyDeviceToHost);
assert(h_sample() == h_test_sample0());
assert(h_sample() != h_test_sample1());
}
template <typename TDF>
__global__ void kernel_test_hcal_qie8_digis(TDF *pdfs, uint32_t *out) {
int id = threadIdx.x;
uint32_t sum = 0;
for (auto i = 0; i < 10; i++)
sum += pdfs[id].sample(i).raw();
out[id] = sum;
}
template <typename TDF>
__global__ void kernel_test_hcal_qie1011_digis(uint16_t *pdfs, uint32_t *out, int samples) {
printf("kernel: testing hcal qie1011 df\n");
int id = threadIdx.x;
uint32_t sum = 0;
int nwords = TDF::WORDS_PER_SAMPLE * samples + TDF::HEADER_WORDS + TDF::FLAG_WORDS;
TDF df(edm::DataFrame(0, pdfs + id * nwords, nwords));
for (auto i = 0; i < df.samples(); i++) {
sum += df[i].adc();
}
out[id] = sum;
}
template <typename TDF>
void test_hcal_qie1011_digis() {
constexpr int size = 10;
constexpr int samples = 10;
constexpr int detid = 2;
HcalDataFrameContainer<TDF> coll{samples, detid};
uint16_t *d_data;
uint32_t *d_out;
uint32_t h_out[size], h_test_out[size];
for (auto i = 0; i < size; i++) {
// #words per single TDF
uint16_t tmp[TDF::WORDS_PER_SAMPLE * samples + TDF::HEADER_WORDS + TDF::FLAG_WORDS];
h_test_out[i] = 0;
for (auto j = TDF::HEADER_WORDS; j < TDF::WORDS_PER_SAMPLE * samples + TDF::HEADER_WORDS; j++) {
tmp[j] = 100;
}
TDF df(edm::DataFrame(0, tmp, TDF::WORDS_PER_SAMPLE * samples + TDF::HEADER_WORDS + TDF::FLAG_WORDS));
for (auto j = 0; j < df.samples(); j++)
h_test_out[i] += df[j].adc();
coll.addDataFrame(DetId{(uint32_t)i}, (uint16_t *)&tmp);
}
cudaMalloc((void **)&d_data,
size * (TDF::WORDS_PER_SAMPLE * samples + TDF::HEADER_WORDS + TDF::FLAG_WORDS) * sizeof(uint16_t));
cudaMalloc((void **)&d_out, size * sizeof(uint32_t));
cudaMemcpy(d_data,
coll.frame(0),
size * (TDF::WORDS_PER_SAMPLE * samples + TDF::HEADER_WORDS + TDF::FLAG_WORDS) * sizeof(uint16_t),
cudaMemcpyHostToDevice);
kernel_test_hcal_qie1011_digis<TDF><<<1, size>>>(d_data, d_out, samples);
cudaDeviceSynchronize();
auto code = cudaGetLastError();
if (code != cudaSuccess)
std::cout << cudaGetErrorString(code);
cudaMemcpy(&h_out, d_out, size * sizeof(uint32_t), cudaMemcpyDeviceToHost);
// comparison
for (auto i = 0; i < size; i++) {
std::cout << h_out[i] << " == " << h_test_out[i] << std::endl;
assert(h_out[i] == h_test_out[i]);
}
}
template <typename TDF>
void test_hcal_qie8_digis() {
constexpr int n = 10;
edm::SortedCollection<TDF> coll{n};
TDF *d_dfs;
uint32_t *d_out;
uint32_t h_out[n], h_test_out[n];
for (auto i = 0; i < n; i++) {
TDF &df = coll[i];
df.setSize(10);
h_test_out[i] = 0;
uint32_t test = 0;
for (auto j = 0; j < 10; j++) {
df.setSample(j, HcalQIESample(100));
h_test_out[i] += df.sample(j).raw();
test += df.sample(j).raw();
}
}
cudaMalloc((void **)&d_dfs, n * sizeof(TDF));
cudaMalloc((void **)&d_out, n * sizeof(uint32_t));
cudaMemcpy(d_dfs, &(*coll.begin()), n * sizeof(TDF), cudaMemcpyHostToDevice);
kernel_test_hcal_qie8_digis<<<1, n>>>(d_dfs, d_out);
cudaMemcpy(&h_out, d_out, n * sizeof(uint32_t), cudaMemcpyDeviceToHost);
std::cout << "collection size = " << coll.size() << std::endl;
// comparison
for (auto i = 0; i < n; i++) {
std::cout << h_out[i] << " == " << h_test_out[i] << std::endl;
assert(h_out[i] == h_test_out[i]);
}
}
void test_hcal_qie8_hbhedf() {
HBHEDataFrame h_df, h_test_df;
HBHEDataFrame *d_df;
h_test_df.setSize(10);
for (auto i = 0; i < 10; i++)
h_test_df.setSample(i, HcalQIESample(100));
h_test_df.setReadoutIds(HcalElectronicsId(100));
cudaMalloc((void **)&d_df, sizeof(HBHEDataFrame));
cudaMemcpy(d_df, &h_df, sizeof(HBHEDataFrame), cudaMemcpyHostToDevice);
kernel_test_hcal_qie8_hbhedf<<<1, 1>>>(d_df);
cudaMemcpy(&h_df, d_df, sizeof(HBHEDataFrame), cudaMemcpyDeviceToHost);
assert(h_df.size() == h_test_df.size());
assert(h_df.elecId() == h_test_df.elecId());
for (auto i = 0; i < 10; i++)
assert(h_df[i].raw() == h_test_df[i].raw());
}
int main(int argc, char **argv) {
cms::cudatest::requireDevices();
// qie8
test_hcal_qiesample();
test_hcal_qie8_hbhedf();
test_hcal_qie8_digis<HBHEDataFrame>();
test_hcal_qie8_digis<HFDataFrame>();
test_hcal_qie8_digis<HODataFrame>();
// qie1011
test_hcal_qie1011_digis<QIE10DataFrame>();
test_hcal_qie1011_digis<QIE11DataFrame>();
return 0;
}
|
bc32a0360e1bbe9cfdffe0544d30da14a27a8812.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
#include<limits>
double diff(timespec start, timespec end)
{
double a=0;
if((end.tv_nsec-start.tv_nsec)<0)
{
a=end.tv_sec-start.tv_sec-1;
a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0;
}
else
{
a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0;
}
return a;
}
struct NUM_ADD
{
short2 read_reference_number;
int address_array;
};
__global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction) //, char * result
{
int offset=blockIdx.x;
__shared__ short2 read_reference_number;
__shared__ char * read_base_array;
__shared__ char4 * reference_base_array;
__shared__ int mismatch;
__shared__ int match;
__shared__ int open;
__shared__ int extend;
__shared__ short2 * direction_index;
while(offset<size)
{
if( threadIdx.x==0)
{
read_reference_number=num_add[offset].read_reference_number;
read_base_array=(char *) (data+num_add[offset].address_array);
reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128);
direction_index=(short2 *) (direction+offset*640*1100);
}
__syncthreads();
__shared__ char reference_base_in_char[600];
int hh=(read_reference_number.y+4-1)/4;
int tt=(hh+blockDim.x-1)/blockDim.x;
for(int ii=0;ii<tt;ii++)
{
int aa=threadIdx.x+ii*blockDim.x;
if(aa< hh)
{
char4 reference_base_in_thread;
reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory
reference_base_in_char[aa*4]=reference_base_in_thread.x;
reference_base_in_char[aa*4+1]=reference_base_in_thread.y;
reference_base_in_char[aa*4+2]=reference_base_in_thread.z;
reference_base_in_char[aa*4+3]=reference_base_in_thread.w;
}
}
__shared__ int MM[322];
__shared__ int gap_h[322]; //insertion
__shared__ short2 gap_size_h[322]; //insertion
__shared__ int result_col;
__shared__ int result_row;
__shared__ int result_col_index;
__shared__ int result_row_index;
//__shared__ char cigar_m[128];
//__shared__ int cigar_int_m[128];
//int final_result;
//int final_i;
//int final_j;
if(threadIdx.x==0)
{
MM[0]=0;
gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2;
gap_size_h[0].x=0;
gap_size_h[0].y=0;
match=200;
mismatch=-150;
open=-260;
extend=-11;
result_col=-1000000000;//std::numeric_limits<int>::min()/2;
result_row=-1000000000;//std::numeric_limits<int>::min()/2;
// for(int i=0;i<read_reference_number.y;i++)
// printf("%c",reference_base_in_char[i]);
// printf("\n");
// for(int i=0;i<read_reference_number.x;i++)
// printf("%c",read_base_array[i]);
}
__syncthreads();
int read_number=read_reference_number.x;
{
char read_base;
read_base=read_base_array[threadIdx.x];
int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;;
int gap_size_v=0; //Deletion
int M=0; //now
int step_right; //now
int ki=0;//insertion h negetive
//deletion v
int MMM=0;
short mt=0;
short2 curmt;
curmt.x=0;
curmt.y=0;
int current_reference_id=0;
for(int j=0;j<read_reference_number.x+read_reference_number.y-1;j++)
{
int aa=j-threadIdx.x;
if( aa>=0 && (current_reference_id<read_reference_number.y))
{
int prev_gap=M+open; //M which is cacluated by last step in the same thread
gap_v+=extend;
if(prev_gap>gap_v)
{
gap_v=prev_gap;
gap_size_v=1;
}
else
gap_size_v++;
char reference_base_each=reference_base_in_char[current_reference_id];
M=MMM+(read_base==reference_base_each? match:mismatch);
prev_gap=MM[threadIdx.x]+open;
step_right=gap_h[threadIdx.x]+extend;
if(prev_gap>step_right)
{
step_right=prev_gap;
ki=1;
}
else
ki=gap_size_h[threadIdx.x].x+1;
bool diag=(M>=gap_v)&&(M>=step_right);
curmt.y=0;
if(diag)
{
curmt.x=0;
//if(threadIdx.x==0||current_reference_id==0)
// curmt.y=0;
// else
curmt.y=mt+1;
// curBtrack=0;
}
else
if(step_right>=gap_v)
{
M=step_right;
curmt.x=0-ki;
// curBtrack=0-ki;
}
else
{
M=gap_v;
curmt.x=gap_size_v;
//curBtrack=gap_size_v;
}
MMM=MM[threadIdx.x];
mt=gap_size_h[threadIdx.x].y;
direction_index[640*j+threadIdx.x]=curmt;
//if(threadIdx.x==read_reference_number.x-3)
//printf("%p %d ", &direction_index[800*j+threadIdx.x],curBtrack);
if(current_reference_id==read_reference_number.y-1)
{
if(M>=result_row)
{
result_row=M;
result_row_index=threadIdx.x; //
}
//printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x);
}
if(threadIdx.x==read_reference_number.x-1)
{
if(M>=result_col)
{
result_col=M;
result_col_index=current_reference_id; // +1
}
}
current_reference_id++;
}
__syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads.
MM[threadIdx.x+1]=M;
gap_h[threadIdx.x+1]=step_right;
gap_size_h[threadIdx.x+1].x=ki;
gap_size_h[threadIdx.x+1].y=curmt.y;
__syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed.
}
}
// char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion
// __shared__ int cigar_index;
// int segment_length;
// short2 btr;
// char new_state;
// int step_length;
int4 result4;
if(threadIdx.x==read_reference_number.x-1)
{
//printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index);
if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1))
{
// final_result=result_row;
result4.x=read_reference_number.y-1;
result4.y=result_row_index;
result4.z=read_reference_number.x-1-result_row_index;
}
else
{
// final_result=result_col;
result4.x=result_col_index;
result4.y=read_reference_number.x-1;
result4.z=0;
}
//result[offset*3]=final_result;
//printf("%d\n",final_result);
//result4.x=fina_i;
//result4.y=fina_j;
//result4.z=segment_length;
result[offset]=result4;
}
__syncthreads();
offset+=gridDim.x;
}
}
__global__ void calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //, char * result
{
int offset=blockIdx.x;
int4 result4;;
short2 * direction_index;
__shared__ char * cigar_store;
__shared__ int *cigar_int_store;
__shared__ char cigar_m[128];
__shared__ int cigar_int_m[128];
while(offset<size)
{
char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion
__shared__ int cigar_index;
int segment_length;
short2 btr;
char new_state;
int step_length;
if( threadIdx.x==0)
{
result4=result[offset];
direction_index=(short2 *) (direction+offset*640*1100);
cigar_store=(char *) (cigar+offset*sizeof(char)*128);
cigar_int_store=(int *) (cigar_int+offset*128);
//printf("\n %d %d\n", final_i,final_j);
cigar_index=0;
if(result4.z>0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=result4.z;
cigar_index++;
}
segment_length=0;
state='N';
do
{
btr=direction_index[(result4.x+result4.y)*640+result4.y];
if(btr.x>0)
{
new_state='D';
step_length=btr.x;
result4.x-=step_length;
}
else
if(btr.x<0)
{
new_state='I';
step_length=0-btr.x;
result4.y-=step_length;
}
else
{
new_state='M';
step_length=btr.y;
result4.x-=step_length;
result4.y-=step_length;
}
if(state=='N') state=new_state;
if(state==new_state)
{
segment_length+=step_length;
}
else
{
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
segment_length=step_length;
cigar_index++;
state=new_state;
}
}while(result4.x>=0&&result4.y>=0);
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
cigar_index++;
if(result4.y>=0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=result4.y+1;
cigar_index++;
}
result4.z=result4.x+1;
result4.w=cigar_index;
result[offset]=result4;
/* for(int i=cigar_index-1;i>=0;i--)
{
printf("%d%c",cigar_int_m[i],cigar_m[i]);
}
*/
}
__syncthreads();
if(threadIdx.x<cigar_index && cigar_index<=blockDim.x)
{
// if(threadIdx.x==0)
// printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]);
cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x];
cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x];
// if(threadIdx.x==0)
// printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]);
}
offset+=gridDim.x;
}
}
struct InputData
{
char read_base[600];
char reference_base[600];
};
int main(int artc, char* args[])
{
int total_size=0;
FILE * file;
file=fopen(args[1],"r");
int size;
double computation_time=0;//total_time=0;
timespec start,finish;
/* char data[200][1000];
for(int i=0;i<101;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{ int index=0;
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<row;i++)
for(int j=0;j<col;j++)
{
strcpy(inputdata[index].reference_base,data[1]);
strcpy(inputdata[index].read_base,data[1]);
index++;
}
*/
/* fscanf(file,"%d",&size);
while(!feof(file))
{
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<size;i++)
{
fscanf(file,"%s ",inputdata[i].reference_base);
fscanf(file,"%s ",inputdata[i].read_base);
}
*/
char data[200][1000];
for(int i=0;i<101;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{ int index=0;
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<row;i++)
for(int j=0;j<col;j++)
{
strcpy(inputdata[index].reference_base,data[i]);
strcpy(inputdata[index].read_base,data[j]);
index++;
}
//data preparation.
char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128);
NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total);
char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align
int data_size=0;
char * data_d_total;
hipMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4);
int * result_h=(int *) malloc(sizeof(int)*size*4);
char * cigar_h=(char *) malloc(sizeof(char)*size*128);
int * cigar_int_h=(int *) malloc(sizeof(int)*size*128);
for(int i=0;i<size;i++)
{
char4 reference_tep[150];
int read_len=strlen(inputdata[i].read_base);
int ref_len=strlen(inputdata[i].reference_base);
int new_len=(ref_len+4-1)/4;
total_size+=ref_len*read_len;
for(int j=0;j<new_len;j++)
{
reference_tep[j].x=inputdata[i].reference_base[j*4];
if(j*4+1<ref_len)
reference_tep[j].y=inputdata[i].reference_base[j*4+1];
if(j*4+2<ref_len)
reference_tep[j].z=inputdata[i].reference_base[j*4+2];
if(j*4+3<ref_len)
reference_tep[j].w=inputdata[i].reference_base[j*4+3];
}
data_num_add[i].read_reference_number.x=read_len;
data_num_add[i].read_reference_number.y=ref_len;
data_num_add[i].address_array=data_size;
memcpy(data_h,inputdata[i].read_base,read_len);
data_h+=(read_len+128-1)/128*128;
data_size+=(read_len+128-1)/128*128;
memcpy(data_h,reference_tep,sizeof(char4)* new_len);
data_h+=(new_len*sizeof(char4)+127)/128*128;
data_size+=(new_len*sizeof(char4)+127)/128*128;
}
int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128;
hipMemcpy(data_d_total,data_h_total,data_size_to_copy,hipMemcpyHostToDevice);
NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total);
char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128;
int4 * result_d=(int4 *) (data_d_total+data_size_to_copy);
char * cigar;
hipMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int)));
int * cigar_int=(int *) (cigar+size*128*sizeof(char));
int * direction;
hipMalloc( (int **) & direction, size * (640*1100* sizeof (int)));
dim3 block(320);
dim3 grid(size);
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
hipLaunchKernelGGL(( calculate_cigar), dim3(grid),dim3(block), 0, 0, size,data_d,num_add_d,result_d,direction); //result
// calculate_cigar_2<<<grid,block>>> (size,result_d,cigar,cigar_int,direction); //result
hipMemcpy(result_h,result_d,size*sizeof(int)*4,hipMemcpyDeviceToHost);
hipMemcpy(cigar_h,cigar,128*sizeof(char)*size, hipMemcpyDeviceToHost);
hipMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,hipMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
computation_time+=diff(start,finish);
/* for(int i=0;i<size;i++)
{
printf("%d\n",result_h[i*4+1]);
printf("[");
for(int j=0;j<result_h[i*4+3];j++)
{
if(j!=0) printf(", ");
printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]);
}
printf("]\n");
}
*/
hipFree(direction);
free(data_h_total);
hipFree(data_d_total);
free(inputdata);
hipFree(cigar);
free(cigar_int_h);
free(cigar_h);
// fscanf(file,"%d",&size);
}
// printf(" computation_time= %e total_time=%e \n",computation_time,0);
printf(" computation_time= %e %d GCUPs=%lf\n",computation_time,total_size,( (double)total_size)/computation_time/1000000000);
return 0;
}
|
bc32a0360e1bbe9cfdffe0544d30da14a27a8812.cu
|
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <cuda.h>
#include <stdint.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
#include<limits>
double diff(timespec start, timespec end)
{
double a=0;
if((end.tv_nsec-start.tv_nsec)<0)
{
a=end.tv_sec-start.tv_sec-1;
a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0;
}
else
{
a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0;
}
return a;
}
struct NUM_ADD
{
short2 read_reference_number;
int address_array;
};
__global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction) //, char * result
{
int offset=blockIdx.x;
__shared__ short2 read_reference_number;
__shared__ char * read_base_array;
__shared__ char4 * reference_base_array;
__shared__ int mismatch;
__shared__ int match;
__shared__ int open;
__shared__ int extend;
__shared__ short2 * direction_index;
while(offset<size)
{
if( threadIdx.x==0)
{
read_reference_number=num_add[offset].read_reference_number;
read_base_array=(char *) (data+num_add[offset].address_array);
reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128);
direction_index=(short2 *) (direction+offset*640*1100);
}
__syncthreads();
__shared__ char reference_base_in_char[600];
int hh=(read_reference_number.y+4-1)/4;
int tt=(hh+blockDim.x-1)/blockDim.x;
for(int ii=0;ii<tt;ii++)
{
int aa=threadIdx.x+ii*blockDim.x;
if(aa< hh)
{
char4 reference_base_in_thread;
reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory
reference_base_in_char[aa*4]=reference_base_in_thread.x;
reference_base_in_char[aa*4+1]=reference_base_in_thread.y;
reference_base_in_char[aa*4+2]=reference_base_in_thread.z;
reference_base_in_char[aa*4+3]=reference_base_in_thread.w;
}
}
__shared__ int MM[322];
__shared__ int gap_h[322]; //insertion
__shared__ short2 gap_size_h[322]; //insertion
__shared__ int result_col;
__shared__ int result_row;
__shared__ int result_col_index;
__shared__ int result_row_index;
//__shared__ char cigar_m[128];
//__shared__ int cigar_int_m[128];
//int final_result;
//int final_i;
//int final_j;
if(threadIdx.x==0)
{
MM[0]=0;
gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2;
gap_size_h[0].x=0;
gap_size_h[0].y=0;
match=200;
mismatch=-150;
open=-260;
extend=-11;
result_col=-1000000000;//std::numeric_limits<int>::min()/2;
result_row=-1000000000;//std::numeric_limits<int>::min()/2;
// for(int i=0;i<read_reference_number.y;i++)
// printf("%c",reference_base_in_char[i]);
// printf("\n");
// for(int i=0;i<read_reference_number.x;i++)
// printf("%c",read_base_array[i]);
}
__syncthreads();
int read_number=read_reference_number.x;
{
char read_base;
read_base=read_base_array[threadIdx.x];
int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;;
int gap_size_v=0; //Deletion
int M=0; //now
int step_right; //now
int ki=0;//insertion h negetive
//deletion v
int MMM=0;
short mt=0;
short2 curmt;
curmt.x=0;
curmt.y=0;
int current_reference_id=0;
for(int j=0;j<read_reference_number.x+read_reference_number.y-1;j++)
{
int aa=j-threadIdx.x;
if( aa>=0 && (current_reference_id<read_reference_number.y))
{
int prev_gap=M+open; //M which is cacluated by last step in the same thread
gap_v+=extend;
if(prev_gap>gap_v)
{
gap_v=prev_gap;
gap_size_v=1;
}
else
gap_size_v++;
char reference_base_each=reference_base_in_char[current_reference_id];
M=MMM+(read_base==reference_base_each? match:mismatch);
prev_gap=MM[threadIdx.x]+open;
step_right=gap_h[threadIdx.x]+extend;
if(prev_gap>step_right)
{
step_right=prev_gap;
ki=1;
}
else
ki=gap_size_h[threadIdx.x].x+1;
bool diag=(M>=gap_v)&&(M>=step_right);
curmt.y=0;
if(diag)
{
curmt.x=0;
//if(threadIdx.x==0||current_reference_id==0)
// curmt.y=0;
// else
curmt.y=mt+1;
// curBtrack=0;
}
else
if(step_right>=gap_v)
{
M=step_right;
curmt.x=0-ki;
// curBtrack=0-ki;
}
else
{
M=gap_v;
curmt.x=gap_size_v;
//curBtrack=gap_size_v;
}
MMM=MM[threadIdx.x];
mt=gap_size_h[threadIdx.x].y;
direction_index[640*j+threadIdx.x]=curmt;
//if(threadIdx.x==read_reference_number.x-3)
//printf("%p %d ", &direction_index[800*j+threadIdx.x],curBtrack);
if(current_reference_id==read_reference_number.y-1)
{
if(M>=result_row)
{
result_row=M;
result_row_index=threadIdx.x; //
}
//printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x);
}
if(threadIdx.x==read_reference_number.x-1)
{
if(M>=result_col)
{
result_col=M;
result_col_index=current_reference_id; // +1
}
}
current_reference_id++;
}
__syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads.
MM[threadIdx.x+1]=M;
gap_h[threadIdx.x+1]=step_right;
gap_size_h[threadIdx.x+1].x=ki;
gap_size_h[threadIdx.x+1].y=curmt.y;
__syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed.
}
}
// char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion
// __shared__ int cigar_index;
// int segment_length;
// short2 btr;
// char new_state;
// int step_length;
int4 result4;
if(threadIdx.x==read_reference_number.x-1)
{
//printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index);
if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1))
{
// final_result=result_row;
result4.x=read_reference_number.y-1;
result4.y=result_row_index;
result4.z=read_reference_number.x-1-result_row_index;
}
else
{
// final_result=result_col;
result4.x=result_col_index;
result4.y=read_reference_number.x-1;
result4.z=0;
}
//result[offset*3]=final_result;
//printf("%d\n",final_result);
//result4.x=fina_i;
//result4.y=fina_j;
//result4.z=segment_length;
result[offset]=result4;
}
__syncthreads();
offset+=gridDim.x;
}
}
__global__ void calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //, char * result
{
int offset=blockIdx.x;
int4 result4;;
short2 * direction_index;
__shared__ char * cigar_store;
__shared__ int *cigar_int_store;
__shared__ char cigar_m[128];
__shared__ int cigar_int_m[128];
while(offset<size)
{
char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion
__shared__ int cigar_index;
int segment_length;
short2 btr;
char new_state;
int step_length;
if( threadIdx.x==0)
{
result4=result[offset];
direction_index=(short2 *) (direction+offset*640*1100);
cigar_store=(char *) (cigar+offset*sizeof(char)*128);
cigar_int_store=(int *) (cigar_int+offset*128);
//printf("\n %d %d\n", final_i,final_j);
cigar_index=0;
if(result4.z>0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=result4.z;
cigar_index++;
}
segment_length=0;
state='N';
do
{
btr=direction_index[(result4.x+result4.y)*640+result4.y];
if(btr.x>0)
{
new_state='D';
step_length=btr.x;
result4.x-=step_length;
}
else
if(btr.x<0)
{
new_state='I';
step_length=0-btr.x;
result4.y-=step_length;
}
else
{
new_state='M';
step_length=btr.y;
result4.x-=step_length;
result4.y-=step_length;
}
if(state=='N') state=new_state;
if(state==new_state)
{
segment_length+=step_length;
}
else
{
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
segment_length=step_length;
cigar_index++;
state=new_state;
}
}while(result4.x>=0&&result4.y>=0);
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
cigar_index++;
if(result4.y>=0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=result4.y+1;
cigar_index++;
}
result4.z=result4.x+1;
result4.w=cigar_index;
result[offset]=result4;
/* for(int i=cigar_index-1;i>=0;i--)
{
printf("%d%c",cigar_int_m[i],cigar_m[i]);
}
*/
}
__syncthreads();
if(threadIdx.x<cigar_index && cigar_index<=blockDim.x)
{
// if(threadIdx.x==0)
// printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]);
cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x];
cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x];
// if(threadIdx.x==0)
// printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]);
}
offset+=gridDim.x;
}
}
struct InputData
{
char read_base[600];
char reference_base[600];
};
int main(int artc, char* args[])
{
int total_size=0;
FILE * file;
file=fopen(args[1],"r");
int size;
double computation_time=0;//total_time=0;
timespec start,finish;
/* char data[200][1000];
for(int i=0;i<101;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{ int index=0;
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<row;i++)
for(int j=0;j<col;j++)
{
strcpy(inputdata[index].reference_base,data[1]);
strcpy(inputdata[index].read_base,data[1]);
index++;
}
*/
/* fscanf(file,"%d",&size);
while(!feof(file))
{
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<size;i++)
{
fscanf(file,"%s ",inputdata[i].reference_base);
fscanf(file,"%s ",inputdata[i].read_base);
}
*/
char data[200][1000];
for(int i=0;i<101;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{ int index=0;
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<row;i++)
for(int j=0;j<col;j++)
{
strcpy(inputdata[index].reference_base,data[i]);
strcpy(inputdata[index].read_base,data[j]);
index++;
}
//data preparation.
char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128);
NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total);
char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align
int data_size=0;
char * data_d_total;
cudaMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4);
int * result_h=(int *) malloc(sizeof(int)*size*4);
char * cigar_h=(char *) malloc(sizeof(char)*size*128);
int * cigar_int_h=(int *) malloc(sizeof(int)*size*128);
for(int i=0;i<size;i++)
{
char4 reference_tep[150];
int read_len=strlen(inputdata[i].read_base);
int ref_len=strlen(inputdata[i].reference_base);
int new_len=(ref_len+4-1)/4;
total_size+=ref_len*read_len;
for(int j=0;j<new_len;j++)
{
reference_tep[j].x=inputdata[i].reference_base[j*4];
if(j*4+1<ref_len)
reference_tep[j].y=inputdata[i].reference_base[j*4+1];
if(j*4+2<ref_len)
reference_tep[j].z=inputdata[i].reference_base[j*4+2];
if(j*4+3<ref_len)
reference_tep[j].w=inputdata[i].reference_base[j*4+3];
}
data_num_add[i].read_reference_number.x=read_len;
data_num_add[i].read_reference_number.y=ref_len;
data_num_add[i].address_array=data_size;
memcpy(data_h,inputdata[i].read_base,read_len);
data_h+=(read_len+128-1)/128*128;
data_size+=(read_len+128-1)/128*128;
memcpy(data_h,reference_tep,sizeof(char4)* new_len);
data_h+=(new_len*sizeof(char4)+127)/128*128;
data_size+=(new_len*sizeof(char4)+127)/128*128;
}
int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128;
cudaMemcpy(data_d_total,data_h_total,data_size_to_copy,cudaMemcpyHostToDevice);
NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total);
char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128;
int4 * result_d=(int4 *) (data_d_total+data_size_to_copy);
char * cigar;
cudaMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int)));
int * cigar_int=(int *) (cigar+size*128*sizeof(char));
int * direction;
cudaMalloc( (int **) & direction, size * (640*1100* sizeof (int)));
dim3 block(320);
dim3 grid(size);
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
calculate_cigar<<<grid,block>>> (size,data_d,num_add_d,result_d,direction); //result
// calculate_cigar_2<<<grid,block>>> (size,result_d,cigar,cigar_int,direction); //result
cudaMemcpy(result_h,result_d,size*sizeof(int)*4,cudaMemcpyDeviceToHost);
cudaMemcpy(cigar_h,cigar,128*sizeof(char)*size, cudaMemcpyDeviceToHost);
cudaMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
computation_time+=diff(start,finish);
/* for(int i=0;i<size;i++)
{
printf("%d\n",result_h[i*4+1]);
printf("[");
for(int j=0;j<result_h[i*4+3];j++)
{
if(j!=0) printf(", ");
printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]);
}
printf("]\n");
}
*/
cudaFree(direction);
free(data_h_total);
cudaFree(data_d_total);
free(inputdata);
cudaFree(cigar);
free(cigar_int_h);
free(cigar_h);
// fscanf(file,"%d",&size);
}
// printf(" computation_time= %e total_time=%e \n",computation_time,0);
printf(" computation_time= %e %d GCUPs=%lf\n",computation_time,total_size,( (double)total_size)/computation_time/1000000000);
return 0;
}
|
ba2586e4cdf5734bc7f071e053ebbeda05a66ab9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
}
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
if(da) da[i] += dc[i] * s[i];
db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * a[i] + dc[i] * -b[i];
}
}
|
ba2586e4cdf5734bc7f071e053ebbeda05a66ab9.cu
|
#include "includes.h"
extern "C" {
}
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
if(da) da[i] += dc[i] * s[i];
db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * a[i] + dc[i] * -b[i];
}
}
|
b6105f9fd9ad38ae800f003255ae187b836e3e56.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "imageTransform.h"
#include "helper_math.h"
__device__ float blurKernel[5] = { 0.0219296448f, 0.2285121468f, 0.4991164165f, 0.2285121468f, 0.0219296448f };
const int kernelDia = 5;
const int kernelDia_Small = kernelDia / 2 + 1;
float getKernel(float* kernel, float sigma, int diameter){
float sum = 0.0f;
for (int y = 0; y < diameter; y++){
int b = y - diameter / 2;
float val = expf(-(b*b) / (2 * sigma*sigma));
kernel[y] = val;
sum += val;
}
return sum;
}
void getNormalizedKernel(float* kernel, float sigma, int diameter){
float sum = getKernel(kernel, sigma, diameter);
for (int i = 0; i < diameter; i++){
kernel[i] /= sum;
}
}
__global__ void downsample(float* in_big, float* out_small, int w, int h, int w_small, int h_small) {
// indices for low resolution image access
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int c = threadIdx.z;
// return if pixel outside image
if (x >= w_small || y >= h_small) {
return;
}
// borders of area in high resolution image covered by low resolution pixel
float x_min = static_cast<float>(x)* w / w_small;
float x_max = static_cast<float>(x + 1) * w / w_small;
float y_min = static_cast<float>(y)* h / h_small;
float y_max = static_cast<float>(y + 1) * h / h_small;
// border pixel overlap in y direction
float y_overlap_top = static_cast<int>(y_min)+1 - y_min;
float y_overlap_bottom = y_max - static_cast<int>(y_max);
float acc = 0.0f;
for (int xl = static_cast<int>(x_min); xl < static_cast<int>(x_max)+1; xl++) {
float xarea = fminf(xl + 1, x_max) - fmaxf(xl, x_min);
// add pixel overlap in y direction to accumulator
acc += xarea * y_overlap_top * in_big[xl + static_cast<int>(y_min)* w + c * w * h];
acc += xarea * y_overlap_bottom * in_big[xl + static_cast<int>(y_max)* w + c * w * h];
// handle complete inner pixels in y direction
for (int yl = static_cast<int>(y_min)+1; yl < static_cast<int>(y_max); yl++) {
acc += xarea * in_big[xl + yl * w + c * w * h];
}
}
// write normalized pixel color to small output image
int ind_small = x + y * w_small + c * w_small * h_small;
out_small[ind_small] = acc / ((x_max - x_min) * (y_max - y_min));
}
__global__ void upsample(float* in_small, float* out_big, int w, int h, int w_small, int h_small) {
// indices for high resolution image access
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int c = threadIdx.z;
// return if pixel outside image
if (x >= w || y >= h) {
return;
}
// pixel coordinates in small images
int x_small = x * w_small / w;
int y_small = y * h_small / h;
// distance of x to vertical border in small image corresponding to x+1
float x_border_diff = ((x + 1) * w_small / w) * 1.0f * w / w_small - x;
// distance of y to horizontal border in small image corresponding to y+1
float y_border_diff = ((y + 1) * h_small / h) * 1.0f * h / h_small - y;
// indices to access pixel
int ind_big = x + y * w + c * w * h;
int ind_small = x_small + y_small * w_small + c * w_small * h_small;
// accumulate color contribution
float acc;
if (x_border_diff > 0) {
if (y_border_diff > 0) {
// high resolution pixel crossed by horizontal and vertical pixel border in small image
acc = x_border_diff*y_border_diff*in_small[ind_small] + (1 - x_border_diff)*y_border_diff*in_small[ind_small + 1]
+ x_border_diff*(1 - y_border_diff)*in_small[ind_small + w_small] + (1 - x_border_diff)*(1 - y_border_diff)*in_small[ind_small + w_small + 1];
} else {
// high resolution pixel crossed by vertical pixel border in small image
acc = x_border_diff*in_small[ind_small] + (1 - x_border_diff)*in_small[ind_small + 1];
}
} else {
if (y_border_diff > 0) {
// high resolution pixel crossed by horizontal pixel border in small image
acc = y_border_diff*in_small[ind_small] + (1 - y_border_diff)*in_small[ind_small + w_small];
} else {
// high resolution pixel totally inside pixel of small image
acc = in_small[ind_small];
}
}
out_big[ind_big] = (acc * w_small * h_small) / (w * h);
}
// Kernel to sample the input images up for initialization of the u_i
__global__ void initialUpsample(float* in, float* out, int w, int h, int w_small, int h_small) {
// indices for image access
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int c = threadIdx.z;
// return if pixel outside image
if (x >= w || y >= h) {
return;
}
// get pixel coordinates of input image
int x_small = x * w_small / w;
int y_small = y * h_small / h;
// get pixel value of input image
out[x + y * w + c * w * h] = in[x_small + y_small * w_small + c * w_small * h_small];
}
__device__ float d_downsample(float* in, int x_small, int y_small, int c, int w_small, int h_small) {
int x_big = x_small << 1;
int y_big = y_small << 1;
int w_big = w_small << 1;
int h_big = h_small << 1;
float result = 0.0f;
for (int i = 0; i < kernelDia; i++) {
float sum = 0.0f;
for (int j = 0; j < kernelDia; j++) {
int valIdx_X = clamp(x_big + j - kernelDia / 2, 0, w_big - 1);
int valIdx_Y = clamp(y_big + i - kernelDia / 2, 0, h_big - 1);
sum += blurKernel[j] * in[valIdx_X + valIdx_Y * w_big + w_big * h_big * c];
}
result += sum * blurKernel[i];
}
return result;
}
__device__ float d_upsample(float* in, int x_big, int y_big, int c, int w_big, int h_big) {
// coordinates and size in the small image
int x_small = x_big >> 1;
int y_small = y_big >> 1;
int w_small = w_big >> 1;
int h_small = h_big >> 1;
float result = 0.0f;
// Offset value, zero for left pixel, one for right pixels
int offsX = x_big % 2;
// Offset value, zero for top pixels, one for bottom pixels
int offsY = y_big % 2;
for (int i = 0; i < kernelDia_Small; i++) {
float sum = 0.0f;
int valIdx_Y = clamp(y_small + i - kernelDia_Small / 2, 0, h_small - 1);
for (int j = 0; j < kernelDia_Small; j++) {
// only get values inside the image
int valIdx_X = clamp(x_small + j - kernelDia_Small / 2, 0, w_small - 1);
float val = 0.25f * in[valIdx_X + valIdx_Y * w_small + w_small * h_small * c];
// ignore most left value if a left pixel is evaluated
if (!offsX || j > 0) {
sum += blurKernel[2 * j - offsX] * val;
}
// ignore most right value if a right pixel is evaluated
if (offsX || j < kernelDia_Small - 1) {
sum += blurKernel[2 * j + 1 - offsX] * val;
}
}
// horizontal smooth
// ignore most top value if a bottom pixel is evaluated
if (!offsY || i > 0) {
result += blurKernel[2 * i - offsY] * sum;
}
// ignore most bottom value if a top pixel is evaluated
if (offsY || i < kernelDia_Small - 1) {
result += blurKernel[2 * i + 1 - offsY] * sum;
}
}
return result;
}
// For a 1 dimensional 5 pixel gaussian blur kernel with sigma = 1.2 you have the weights [GK5_2, GK5_1, GK5_0, GK5_1, GK5_2]
// sigma = 1.2
//#define GK5_0 0.3434064786f
//#define GK5_1 0.2426675967f
//#define GK5_2 0.0856291639f
// sigma = 0.8
//#define GK5_0 0.4991164165f
//#define GK5_1 0.2285121468f
//#define GK5_2 0.0219296448f
// sigma = 0.6
#define GK5_0 0.6638183293f
#define GK5_1 0.1655245666f
#define GK5_2 0.0025662686f
__global__ void gaussBlur5(float* in, float* out, int w, int h) {
// shared memory for optimized memory access
extern __shared__ float sdata[];
// indices for image access
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int c = threadIdx.z;
int wb = blockDim.x + 4;
int sindex = threadIdx.x + 2 + (threadIdx.y + 2) * wb + wb * (blockDim.y + 4) * c;
int index = x + y * w + c * w * h;
bool realPixel = (x < w && y < h);
// fill shared memory (area covered by this block + 2 pixel of additional border)
float accum;
for (int si = threadIdx.x + threadIdx.y * blockDim.x + c * blockDim.x * blockDim.y; si < wb*(blockDim.y + 4)*blockDim.z; si += blockDim.x * blockDim.y * blockDim.z) {
int inX = min(w - 1, max(0, blockIdx.x * blockDim.x - 2 + (si % wb)));
int inY = min(h - 1, max(0, blockIdx.y * blockDim.y - 2 + ((si / wb) % (blockDim.y + 4))));
int inZ = si / (wb*(blockDim.y + 4));
accum = in[inX + inY * w + inZ * w * h];
sdata[si] = accum;
}
// wait until all threads have stored the image data
__syncthreads();
float accum2;
if (realPixel) {
// blur horizontally
accum = sdata[sindex - 2] * GK5_2 + sdata[sindex - 1] * GK5_1 + sdata[sindex] * GK5_0 + sdata[sindex + 1] * GK5_1 + sdata[sindex + 2] * GK5_2;
// for the subsequent vertical blur two additional lines at top and bottom of the block have to be blurred as well
if (threadIdx.y <= 1 || threadIdx.y >= blockDim.y - 2) {
int shiftIndex = sindex + (threadIdx.y > 1 ? 2 : -2) * wb;
accum2 = sdata[shiftIndex - 2] * GK5_2 + sdata[shiftIndex - 1] * GK5_1 + sdata[shiftIndex] * GK5_0 + sdata[shiftIndex + 1] * GK5_1 + sdata[shiftIndex + 2] * GK5_2;
}
}
// wait until all threads have computed the horizontal blur
__syncthreads();
if (realPixel) {
// store blurred pixels into shared memory
sdata[sindex] = accum;
if (threadIdx.y <= 1 || threadIdx.y >= blockDim.y - 2) {
sdata[sindex + (threadIdx.y > 1 ? 2 : -2) * wb] = accum2;
}
}
// wait until all threads have stored the horizontally blurred pixel values
__syncthreads();
if (realPixel) {
// blur vertically
accum = sdata[sindex - 2 * wb] * GK5_2 + sdata[sindex - wb] * GK5_1 + sdata[sindex] * GK5_0 + sdata[sindex + wb] * GK5_1 + sdata[sindex + 2 * wb] * GK5_2;
// store result in output image
out[index] = accum;
}
}
__global__ void blur(float *in, float *out, int w, int h, float kernelDia){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int c = threadIdx.z;
int idx = x + y * w + c * w * h;
int radius = kernelDia / 2;
extern __shared__ float sh_data[];
int sharedSizeX = blockDim.x + kernelDia - 1;
int sharedSizeY = blockDim.y + kernelDia - 1;
int shIdxX = threadIdx.x + radius;
int shIdxY = threadIdx.y + radius;
int shIdx = shIdxX + shIdxY * sharedSizeX;
int thIdx = threadIdx.x + threadIdx.y * blockDim.x;
int x0 = blockDim.x * blockIdx.x;
int y0 = blockDim.y * blockIdx.y;
int x0s = x0 - radius;
int y0s = y0 - radius;
bool isImgPixel = x < w && y < h;
for (int sidx = thIdx; sidx < sharedSizeX*sharedSizeY; sidx += blockDim.x*blockDim.y) {
int ix = clamp(x0s + sidx % sharedSizeX, 0, w - 1);
int iy = clamp(y0s + sidx / sharedSizeX, 0, h - 1);
sh_data[sidx] = in[ix + w * iy + c * w * h];
}
__syncthreads();
// horizontal smooth
float sum1 = 0.0f;
float sum2 = 0.0f;
if (isImgPixel) {
for (int i = 0; i < kernelDia; i++){
sum1 += blurKernel[i] * sh_data[shIdx + i - radius];
}
if (threadIdx.x < radius || threadIdx.x >= blockDim.x - radius) {
int shiftIndex = shIdx + (threadIdx.y >= radius ? radius : -radius) * sharedSizeX;
for (int i = 0; i < kernelDia; i++) {
sum2 += blurKernel[i] * sh_data[shiftIndex];
}
}
}
__syncthreads();
if (isImgPixel) {
// store blurred pixels into shared memory
sh_data[shIdx] = sum1;
if (threadIdx.y <= 1 || threadIdx.y >= blockDim.y - 2) {
sh_data[shIdx + (threadIdx.y >= radius ? radius : -radius) * sharedSizeX] = sum2;
}
}
__syncthreads();
// vertical smooth
if (isImgPixel) {
float sum = 0.0f;
for (int i = 0; i < kernelDia; i++){
sum += blurKernel[i] * sh_data[shIdxX + (shIdxY + i - radius) * sharedSizeX];
}
out[idx] = sum;
}
}
|
b6105f9fd9ad38ae800f003255ae187b836e3e56.cu
|
#include "imageTransform.h"
#include "helper_math.h"
__device__ float blurKernel[5] = { 0.0219296448f, 0.2285121468f, 0.4991164165f, 0.2285121468f, 0.0219296448f };
const int kernelDia = 5;
const int kernelDia_Small = kernelDia / 2 + 1;
float getKernel(float* kernel, float sigma, int diameter){
float sum = 0.0f;
for (int y = 0; y < diameter; y++){
int b = y - diameter / 2;
float val = expf(-(b*b) / (2 * sigma*sigma));
kernel[y] = val;
sum += val;
}
return sum;
}
void getNormalizedKernel(float* kernel, float sigma, int diameter){
float sum = getKernel(kernel, sigma, diameter);
for (int i = 0; i < diameter; i++){
kernel[i] /= sum;
}
}
__global__ void downsample(float* in_big, float* out_small, int w, int h, int w_small, int h_small) {
// indices for low resolution image access
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int c = threadIdx.z;
// return if pixel outside image
if (x >= w_small || y >= h_small) {
return;
}
// borders of area in high resolution image covered by low resolution pixel
float x_min = static_cast<float>(x)* w / w_small;
float x_max = static_cast<float>(x + 1) * w / w_small;
float y_min = static_cast<float>(y)* h / h_small;
float y_max = static_cast<float>(y + 1) * h / h_small;
// border pixel overlap in y direction
float y_overlap_top = static_cast<int>(y_min)+1 - y_min;
float y_overlap_bottom = y_max - static_cast<int>(y_max);
float acc = 0.0f;
for (int xl = static_cast<int>(x_min); xl < static_cast<int>(x_max)+1; xl++) {
float xarea = fminf(xl + 1, x_max) - fmaxf(xl, x_min);
// add pixel overlap in y direction to accumulator
acc += xarea * y_overlap_top * in_big[xl + static_cast<int>(y_min)* w + c * w * h];
acc += xarea * y_overlap_bottom * in_big[xl + static_cast<int>(y_max)* w + c * w * h];
// handle complete inner pixels in y direction
for (int yl = static_cast<int>(y_min)+1; yl < static_cast<int>(y_max); yl++) {
acc += xarea * in_big[xl + yl * w + c * w * h];
}
}
// write normalized pixel color to small output image
int ind_small = x + y * w_small + c * w_small * h_small;
out_small[ind_small] = acc / ((x_max - x_min) * (y_max - y_min));
}
__global__ void upsample(float* in_small, float* out_big, int w, int h, int w_small, int h_small) {
// indices for high resolution image access
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int c = threadIdx.z;
// return if pixel outside image
if (x >= w || y >= h) {
return;
}
// pixel coordinates in small images
int x_small = x * w_small / w;
int y_small = y * h_small / h;
// distance of x to vertical border in small image corresponding to x+1
float x_border_diff = ((x + 1) * w_small / w) * 1.0f * w / w_small - x;
// distance of y to horizontal border in small image corresponding to y+1
float y_border_diff = ((y + 1) * h_small / h) * 1.0f * h / h_small - y;
// indices to access pixel
int ind_big = x + y * w + c * w * h;
int ind_small = x_small + y_small * w_small + c * w_small * h_small;
// accumulate color contribution
float acc;
if (x_border_diff > 0) {
if (y_border_diff > 0) {
// high resolution pixel crossed by horizontal and vertical pixel border in small image
acc = x_border_diff*y_border_diff*in_small[ind_small] + (1 - x_border_diff)*y_border_diff*in_small[ind_small + 1]
+ x_border_diff*(1 - y_border_diff)*in_small[ind_small + w_small] + (1 - x_border_diff)*(1 - y_border_diff)*in_small[ind_small + w_small + 1];
} else {
// high resolution pixel crossed by vertical pixel border in small image
acc = x_border_diff*in_small[ind_small] + (1 - x_border_diff)*in_small[ind_small + 1];
}
} else {
if (y_border_diff > 0) {
// high resolution pixel crossed by horizontal pixel border in small image
acc = y_border_diff*in_small[ind_small] + (1 - y_border_diff)*in_small[ind_small + w_small];
} else {
// high resolution pixel totally inside pixel of small image
acc = in_small[ind_small];
}
}
out_big[ind_big] = (acc * w_small * h_small) / (w * h);
}
// Kernel to sample the input images up for initialization of the u_i
__global__ void initialUpsample(float* in, float* out, int w, int h, int w_small, int h_small) {
// indices for image access
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int c = threadIdx.z;
// return if pixel outside image
if (x >= w || y >= h) {
return;
}
// get pixel coordinates of input image
int x_small = x * w_small / w;
int y_small = y * h_small / h;
// get pixel value of input image
out[x + y * w + c * w * h] = in[x_small + y_small * w_small + c * w_small * h_small];
}
__device__ float d_downsample(float* in, int x_small, int y_small, int c, int w_small, int h_small) {
int x_big = x_small << 1;
int y_big = y_small << 1;
int w_big = w_small << 1;
int h_big = h_small << 1;
float result = 0.0f;
for (int i = 0; i < kernelDia; i++) {
float sum = 0.0f;
for (int j = 0; j < kernelDia; j++) {
int valIdx_X = clamp(x_big + j - kernelDia / 2, 0, w_big - 1);
int valIdx_Y = clamp(y_big + i - kernelDia / 2, 0, h_big - 1);
sum += blurKernel[j] * in[valIdx_X + valIdx_Y * w_big + w_big * h_big * c];
}
result += sum * blurKernel[i];
}
return result;
}
__device__ float d_upsample(float* in, int x_big, int y_big, int c, int w_big, int h_big) {
// coordinates and size in the small image
int x_small = x_big >> 1;
int y_small = y_big >> 1;
int w_small = w_big >> 1;
int h_small = h_big >> 1;
float result = 0.0f;
// Offset value, zero for left pixel, one for right pixels
int offsX = x_big % 2;
// Offset value, zero for top pixels, one for bottom pixels
int offsY = y_big % 2;
for (int i = 0; i < kernelDia_Small; i++) {
float sum = 0.0f;
int valIdx_Y = clamp(y_small + i - kernelDia_Small / 2, 0, h_small - 1);
for (int j = 0; j < kernelDia_Small; j++) {
// only get values inside the image
int valIdx_X = clamp(x_small + j - kernelDia_Small / 2, 0, w_small - 1);
float val = 0.25f * in[valIdx_X + valIdx_Y * w_small + w_small * h_small * c];
// ignore most left value if a left pixel is evaluated
if (!offsX || j > 0) {
sum += blurKernel[2 * j - offsX] * val;
}
// ignore most right value if a right pixel is evaluated
if (offsX || j < kernelDia_Small - 1) {
sum += blurKernel[2 * j + 1 - offsX] * val;
}
}
// horizontal smooth
// ignore most top value if a bottom pixel is evaluated
if (!offsY || i > 0) {
result += blurKernel[2 * i - offsY] * sum;
}
// ignore most bottom value if a top pixel is evaluated
if (offsY || i < kernelDia_Small - 1) {
result += blurKernel[2 * i + 1 - offsY] * sum;
}
}
return result;
}
// For a 1 dimensional 5 pixel gaussian blur kernel with sigma = 1.2 you have the weights [GK5_2, GK5_1, GK5_0, GK5_1, GK5_2]
// sigma = 1.2
//#define GK5_0 0.3434064786f
//#define GK5_1 0.2426675967f
//#define GK5_2 0.0856291639f
// sigma = 0.8
//#define GK5_0 0.4991164165f
//#define GK5_1 0.2285121468f
//#define GK5_2 0.0219296448f
// sigma = 0.6
#define GK5_0 0.6638183293f
#define GK5_1 0.1655245666f
#define GK5_2 0.0025662686f
__global__ void gaussBlur5(float* in, float* out, int w, int h) {
// shared memory for optimized memory access
extern __shared__ float sdata[];
// indices for image access
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int c = threadIdx.z;
int wb = blockDim.x + 4;
int sindex = threadIdx.x + 2 + (threadIdx.y + 2) * wb + wb * (blockDim.y + 4) * c;
int index = x + y * w + c * w * h;
bool realPixel = (x < w && y < h);
// fill shared memory (area covered by this block + 2 pixel of additional border)
float accum;
for (int si = threadIdx.x + threadIdx.y * blockDim.x + c * blockDim.x * blockDim.y; si < wb*(blockDim.y + 4)*blockDim.z; si += blockDim.x * blockDim.y * blockDim.z) {
int inX = min(w - 1, max(0, blockIdx.x * blockDim.x - 2 + (si % wb)));
int inY = min(h - 1, max(0, blockIdx.y * blockDim.y - 2 + ((si / wb) % (blockDim.y + 4))));
int inZ = si / (wb*(blockDim.y + 4));
accum = in[inX + inY * w + inZ * w * h];
sdata[si] = accum;
}
// wait until all threads have stored the image data
__syncthreads();
float accum2;
if (realPixel) {
// blur horizontally
accum = sdata[sindex - 2] * GK5_2 + sdata[sindex - 1] * GK5_1 + sdata[sindex] * GK5_0 + sdata[sindex + 1] * GK5_1 + sdata[sindex + 2] * GK5_2;
// for the subsequent vertical blur two additional lines at top and bottom of the block have to be blurred as well
if (threadIdx.y <= 1 || threadIdx.y >= blockDim.y - 2) {
int shiftIndex = sindex + (threadIdx.y > 1 ? 2 : -2) * wb;
accum2 = sdata[shiftIndex - 2] * GK5_2 + sdata[shiftIndex - 1] * GK5_1 + sdata[shiftIndex] * GK5_0 + sdata[shiftIndex + 1] * GK5_1 + sdata[shiftIndex + 2] * GK5_2;
}
}
// wait until all threads have computed the horizontal blur
__syncthreads();
if (realPixel) {
// store blurred pixels into shared memory
sdata[sindex] = accum;
if (threadIdx.y <= 1 || threadIdx.y >= blockDim.y - 2) {
sdata[sindex + (threadIdx.y > 1 ? 2 : -2) * wb] = accum2;
}
}
// wait until all threads have stored the horizontally blurred pixel values
__syncthreads();
if (realPixel) {
// blur vertically
accum = sdata[sindex - 2 * wb] * GK5_2 + sdata[sindex - wb] * GK5_1 + sdata[sindex] * GK5_0 + sdata[sindex + wb] * GK5_1 + sdata[sindex + 2 * wb] * GK5_2;
// store result in output image
out[index] = accum;
}
}
__global__ void blur(float *in, float *out, int w, int h, float kernelDia){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int c = threadIdx.z;
int idx = x + y * w + c * w * h;
int radius = kernelDia / 2;
extern __shared__ float sh_data[];
int sharedSizeX = blockDim.x + kernelDia - 1;
int sharedSizeY = blockDim.y + kernelDia - 1;
int shIdxX = threadIdx.x + radius;
int shIdxY = threadIdx.y + radius;
int shIdx = shIdxX + shIdxY * sharedSizeX;
int thIdx = threadIdx.x + threadIdx.y * blockDim.x;
int x0 = blockDim.x * blockIdx.x;
int y0 = blockDim.y * blockIdx.y;
int x0s = x0 - radius;
int y0s = y0 - radius;
bool isImgPixel = x < w && y < h;
for (int sidx = thIdx; sidx < sharedSizeX*sharedSizeY; sidx += blockDim.x*blockDim.y) {
int ix = clamp(x0s + sidx % sharedSizeX, 0, w - 1);
int iy = clamp(y0s + sidx / sharedSizeX, 0, h - 1);
sh_data[sidx] = in[ix + w * iy + c * w * h];
}
__syncthreads();
// horizontal smooth
float sum1 = 0.0f;
float sum2 = 0.0f;
if (isImgPixel) {
for (int i = 0; i < kernelDia; i++){
sum1 += blurKernel[i] * sh_data[shIdx + i - radius];
}
if (threadIdx.x < radius || threadIdx.x >= blockDim.x - radius) {
int shiftIndex = shIdx + (threadIdx.y >= radius ? radius : -radius) * sharedSizeX;
for (int i = 0; i < kernelDia; i++) {
sum2 += blurKernel[i] * sh_data[shiftIndex];
}
}
}
__syncthreads();
if (isImgPixel) {
// store blurred pixels into shared memory
sh_data[shIdx] = sum1;
if (threadIdx.y <= 1 || threadIdx.y >= blockDim.y - 2) {
sh_data[shIdx + (threadIdx.y >= radius ? radius : -radius) * sharedSizeX] = sum2;
}
}
__syncthreads();
// vertical smooth
if (isImgPixel) {
float sum = 0.0f;
for (int i = 0; i < kernelDia; i++){
sum += blurKernel[i] * sh_data[shIdxX + (shIdxY + i - radius) * sharedSizeX];
}
out[idx] = sum;
}
}
|
9b5764aa46aa83e3513bf21e03db9cb8d5dc50e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#include <string>
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 5660800
// #define GLOBAL_MEM_ELEMENTS 566080
// #define GLOBAL_MEM_ELEMENTS 19660800
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
int elements_per_warp = elements_per_block / num_warps_per_block;
// int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
// index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))];
}
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
int elements_per_warp = elements_per_block / num_warps_per_block;
// int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
// int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
9b5764aa46aa83e3513bf21e03db9cb8d5dc50e5.cu
|
#include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#include <string>
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 5660800
// #define GLOBAL_MEM_ELEMENTS 566080
// #define GLOBAL_MEM_ELEMENTS 19660800
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
int elements_per_warp = elements_per_block / num_warps_per_block;
// int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
// index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))];
}
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
int elements_per_warp = elements_per_block / num_warps_per_block;
// int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
// int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
c1e55a2d235da683e9204b1dcaea158042bc6824.hip
|
// !!! This is a file automatically generated by hipify!!!
#include<curd_lib_host.h>
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
Parallel reduction
This sample shows how to perform a reduction operation on an array of values
to produce a single value in a single kernel (as opposed to two or more
kernel calls as shown in the "reduction" CUDA Sample). Single-pass
reduction requires global atomic instructions (Compute Capability 1.1 or
later) and the __threadfence() intrinsic (CUDA 2.2 or later).
Reductions are a very common computation in parallel algorithms. Any time
an array of values needs to be reduced to a single value using a binary
associative operator, a reduction can be used. Example applications include
statistics computations such as mean and standard deviation, and image
processing applications such as finding the total luminance of an
image.
This code performs sum reductions, but any associative operator such as
min() or max() could also be used.
It assumes the input size is a power of 2.
COMMAND LINE ARGUMENTS
"--shmoo": Test performance for 1 to 32M elements with each of the 7 different kernels
"--n=<N>": Specify the number of elements to reduce (default 1048576)
"--threads=<N>": Specify the number of threads per block (default 128)
"--maxblocks=<N>": Specify the maximum number of thread blocks to launch (kernel 6 only, default 64)
"--cpufinal": Read back the per-block results and do final sum of block sums on CPU (default false)
"--cputhresh=<N>": The threshold of number of blocks sums below which to perform a CPU final reduction (default 1)
"--multipass": Use a multipass reduction instead of a single-pass reduction
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <helper_functions.h>
#include <helper_cuda.h>
#include "../../benchmark.h"
#define VERSION_MAJOR (CUDART_VERSION/1000)
#define VERSION_MINOR (CUDART_VERSION%100)/10
const char *sSDKsample = "threadFenceReduction";
#if CUDART_VERSION >= 2020
#include "threadFenceReduction_kernel.cuh"
#else
#pragma comment(user, "CUDA 2.2 is required to build for threadFenceReduction")
#endif
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
bool runTest(int argc, char **argv);
extern "C"
{
void reduce(int size, int threads, int blocks, float *d_idata, float *d_odata);
void reduceSinglePass(int size, int threads, int blocks, float *d_idata, float *d_odata);
}
#if CUDART_VERSION < 2020
void reduce(int size, int threads, int blocks, float *d_idata, float *d_odata)
{
printf("reduce(), compiler not supported, aborting tests\n");
}
void reduceSinglePass(int size, int threads, int blocks, float *d_idata, float *d_odata)
{
printf("reduceSinglePass(), compiler not supported, aborting tests\n");
}
#endif
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
hipDeviceProp_t deviceProp;
deviceProp.major = 0;
deviceProp.minor = 0;
int dev;
// printf("%s Starting...\n\n", sSDKsample);
/*dev = findCudaDevice(argc, (const char **)argv);
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));*/
// printf("GPU Device supports SM %d.%d compute capability\n\n", deviceProp.major, deviceProp.minor);
bool bTestResult = false;
#if CUDART_VERSION >= 2020
bTestResult = runTest(argc, argv);
#else
print_NVCC_min_spec(sSDKsample, "2.2", "Version 185");
exit(EXIT_SUCCESS);
#endif
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
////////////////////////////////////////////////////////////////////////////////
//! Compute sum reduction on CPU
//! We use Kahan summation for an accurate sum of large arrays.
//! http://en.wikipedia.org/wiki/Kahan_summation_algorithm
//!
//! @param data pointer to input data
//! @param size number of input data elements
////////////////////////////////////////////////////////////////////////////////
template<class T>
T reduceCPU(T *data, int size)
{
T sum = data[0];
T c = (T)0.0;
for (int i = 1; i < size; i++)
{
T y = data[i] - c;
T t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
////////////////////////////////////////////////////////////////////////////////
// Compute the number of threads and blocks to use for the reduction
// We set threads / block to the minimum of maxThreads and n/2.
////////////////////////////////////////////////////////////////////////////////
void getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (n == 1)
{
threads = 1;
blocks = 1;
}
else
{
threads = (n < maxThreads*2) ? nextPow2(n / 2) : maxThreads;
blocks = max(1, n / (threads * 2));
}
blocks = min(maxBlocks, blocks);
}
////////////////////////////////////////////////////////////////////////////////
// This function performs a reduction of the input data multiple times and
// measures the average reduction time.
////////////////////////////////////////////////////////////////////////////////
float benchmarkReduce(int n,
int numThreads,
int numBlocks,
int maxThreads,
int maxBlocks,
int testIterations,
bool multiPass,
bool cpuFinalReduction,
int cpuFinalThreshold,
StopWatchInterface *timer,
float *h_odata,
float *d_idata,
float *d_odata)
{
float gpu_result = 0;
bool bNeedReadback = true;
hipError_t error;
for (int i = 0; i < testIterations; ++i)
{
gpu_result = 0;
unsigned int retCnt = 0;
error = setRetirementCount(retCnt);
checkCudaErrors(error);
hipDeviceSynchronize();
sdkStartTimer(&timer);
if (multiPass)
{
// execute the kernel
BENCHMARK.start_kernel();
reduce(n, numThreads, numBlocks, d_idata, d_odata);
BENCHMARK.end_kernel();
// check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
if (cpuFinalReduction)
{
// sum partial sums from each block on CPU
// copy result from device to host
error = hipMemcpy(h_odata, d_odata, numBlocks*sizeof(float), hipMemcpyDeviceToHost);
checkCudaErrors(error);
for (int i=0; i<numBlocks; i++)
{
gpu_result += h_odata[i];
}
bNeedReadback = false;
}
else
{
// sum partial block sums on GPU
int s=numBlocks;
while (s > cpuFinalThreshold)
{
int threads = 0, blocks = 0;
getNumBlocksAndThreads(s, maxBlocks, maxThreads, blocks, threads);
reduce(s, threads, blocks, d_odata, d_odata);
s = s / (threads*2);
}
if (s > 1)
{
// copy result from device to host
error = hipMemcpy(h_odata, d_odata, s * sizeof(float), hipMemcpyDeviceToHost);
checkCudaErrors(error);
for (int i=0; i < s; i++)
{
gpu_result += h_odata[i];
}
bNeedReadback = false;
}
}
}
else
{
getLastCudaError("Kernel execution failed");
// execute the kernel
BENCHMARK.start_kernel();
reduceSinglePass(n, numThreads, numBlocks, d_idata, d_odata);
BENCHMARK.end_kernel();
// check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
}
hipDeviceSynchronize();
sdkStopTimer(&timer);
}
if (bNeedReadback)
{
// copy final sum from device to host
error = hipMemcpy(&gpu_result, d_odata, sizeof(float), hipMemcpyDeviceToHost);
checkCudaErrors(error);
}
return gpu_result;
}
////////////////////////////////////////////////////////////////////////////////
// This function calls benchmarkReduce multiple times for a range of array sizes
// and prints a report in CSV (comma-separated value) format that can be used for
// generating a "shmoo" plot showing the performance for each kernel variation
// over a wide range of input sizes.
////////////////////////////////////////////////////////////////////////////////
void shmoo(int minN, int maxN, int maxThreads, int maxBlocks)
{
// create random input data on CPU
unsigned int bytes = maxN * sizeof(float);
float *h_idata = (float *) malloc(bytes);
for (int i = 0; i < maxN; i++)
{
// Keep the numbers small so we don't get truncation error in the sum
h_idata[i] = (rand() & 0xFF) / (float)RAND_MAX;
}
int maxNumBlocks = min(65535, maxN / maxThreads);
// allocate mem for the result on host side
float *h_odata = (float *) malloc(maxNumBlocks*sizeof(float));
// allocate device memory and data
float *d_idata = NULL;
float *d_odata = NULL;
checkCudaErrors(hipMalloc((void **) &d_idata, bytes));
checkCudaErrors(hipMalloc((void **) &d_odata, maxNumBlocks*sizeof(float)));
// copy data directly to device memory
checkCudaErrors(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_odata, h_idata, maxNumBlocks*sizeof(float), hipMemcpyHostToDevice));
// warm-up
reduce(maxN, maxThreads, maxNumBlocks, d_idata, d_odata);
int testIterations = 100;
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
// print headers
//printf("N, %d blocks one pass, %d blocks multipass\n", maxBlocks, maxBlocks);
for (int i = minN; i <= maxN; i *= 2)
{
printf("%d, ", i);
for (int multiPass = 0; multiPass <= 1; multiPass++)
{
sdkResetTimer(&timer);
int numBlocks = 0;
int numThreads = 0;
getNumBlocksAndThreads(i, maxBlocks, maxThreads, numBlocks, numThreads);
benchmarkReduce(i, numThreads, numBlocks, maxThreads, maxBlocks,
testIterations, multiPass==1, false, 1, timer, h_odata, d_idata, d_odata);
float reduceTime = sdkGetAverageTimerValue(&timer);
printf("%f%s", reduceTime, multiPass==0 ? ", " : "\n");
}
}
printf("\n");
// cleanup
sdkDeleteTimer(&timer);
free(h_idata);
free(h_odata);
hipFree(d_idata);
hipFree(d_odata);
}
////////////////////////////////////////////////////////////////////////////////
// The main function which runs the reduction test.
////////////////////////////////////////////////////////////////////////////////
bool
runTest(int argc, char **argv)
{
int size = 1<<20; // number of elements to reduce
int maxThreads = 128; // number of threads per block
int maxBlocks = 64;
bool cpuFinalReduction = false;
int cpuFinalThreshold = 1;
bool multipass = false;
bool bTestResult = false;
if (checkCmdLineFlag(argc, (const char **) argv, "b"))
{
size = getCmdLineArgumentInt(argc, (const char **)argv, "b");
}
if (checkCmdLineFlag(argc, (const char **) argv, "threads"))
{
maxThreads = getCmdLineArgumentInt(argc, (const char **)argv, "threads");
}
if (checkCmdLineFlag(argc, (const char **) argv, "maxblocks"))
{
maxBlocks = getCmdLineArgumentInt(argc, (const char **)argv, "maxblocks");
}
// printf("%d elements\n", size);
// printf("%d threads (max)\n", maxThreads);
cpuFinalReduction = checkCmdLineFlag(argc, (const char **) argv, "cpufinal");
multipass = checkCmdLineFlag(argc, (const char **) argv, "multipass");
if (checkCmdLineFlag(argc, (const char **) argv, "cputhresh"))
{
cpuFinalThreshold = getCmdLineArgumentInt(argc, (const char **) argv, "cputhresh");
}
bool runShmoo = checkCmdLineFlag(argc, (const char **) argv, "shmoo");
if (runShmoo)
{
shmoo(1, 33554432, maxThreads, maxBlocks);
}
else
{
// create random input data on CPU
unsigned int bytes = size * sizeof(float);
float *h_idata = (float *) malloc(bytes);
for (int i=0; i<size; i++)
{
// Keep the numbers small so we don't get truncation error in the sum
h_idata[i] = (rand() & 0xFF) / (float)RAND_MAX;
}
int numBlocks = 0;
int numThreads = 0;
getNumBlocksAndThreads(size, maxBlocks, maxThreads, numBlocks, numThreads);
if (numBlocks == 1)
{
cpuFinalThreshold = 1;
}
// allocate mem for the result on host side
float *h_odata = (float *) malloc(numBlocks*sizeof(float));
//printf("%d blocks\n", numBlocks);
// allocate device memory and data
float *d_idata = NULL;
float *d_odata = NULL;
BENCHMARK.start_total();
checkCudaErrors(hipMalloc((void **) &d_idata, bytes));
checkCudaErrors(hipMalloc((void **) &d_odata, numBlocks*sizeof(float)));
// copy data directly to device memory
checkCudaErrors(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_odata, h_idata, numBlocks*sizeof(float), hipMemcpyHostToDevice));
// warm-up
reduce(size, numThreads, numBlocks, d_idata, d_odata);
int testIterations = 100;
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
float gpu_result = 0;
gpu_result = benchmarkReduce(size, numThreads, numBlocks, maxThreads, maxBlocks,
testIterations, multipass, cpuFinalReduction,
cpuFinalThreshold, timer, h_odata, d_idata, d_odata);
BENCHMARK.end_total();
float reduceTime = sdkGetAverageTimerValue(&timer);
//printf("Average time: %f ms\n", reduceTime);
//printf("Bandwidth: %f GB/s\n\n", (size * sizeof(int)) / (reduceTime * 1.0e6));
// compute reference solution
float cpu_result = reduceCPU<float>(h_idata, size);
// printf("GPU result = %0.12f\n", gpu_result);
// printf("CPU result = %0.12f\n", cpu_result);
double threshold = 1e-8 * size;
double diff = abs((double)gpu_result - (double)cpu_result);
bTestResult = (diff < threshold);
if(diff > threshold)
BENCHMARK.fail();
// cleanup
sdkDeleteTimer(&timer);
free(h_idata);
free(h_odata);
hipFree(d_idata);
hipFree(d_odata);
}
return bTestResult;
}
|
c1e55a2d235da683e9204b1dcaea158042bc6824.cu
|
#include<curd_lib_host.h>
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
Parallel reduction
This sample shows how to perform a reduction operation on an array of values
to produce a single value in a single kernel (as opposed to two or more
kernel calls as shown in the "reduction" CUDA Sample). Single-pass
reduction requires global atomic instructions (Compute Capability 1.1 or
later) and the __threadfence() intrinsic (CUDA 2.2 or later).
Reductions are a very common computation in parallel algorithms. Any time
an array of values needs to be reduced to a single value using a binary
associative operator, a reduction can be used. Example applications include
statistics computations such as mean and standard deviation, and image
processing applications such as finding the total luminance of an
image.
This code performs sum reductions, but any associative operator such as
min() or max() could also be used.
It assumes the input size is a power of 2.
COMMAND LINE ARGUMENTS
"--shmoo": Test performance for 1 to 32M elements with each of the 7 different kernels
"--n=<N>": Specify the number of elements to reduce (default 1048576)
"--threads=<N>": Specify the number of threads per block (default 128)
"--maxblocks=<N>": Specify the maximum number of thread blocks to launch (kernel 6 only, default 64)
"--cpufinal": Read back the per-block results and do final sum of block sums on CPU (default false)
"--cputhresh=<N>": The threshold of number of blocks sums below which to perform a CPU final reduction (default 1)
"--multipass": Use a multipass reduction instead of a single-pass reduction
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <helper_functions.h>
#include <helper_cuda.h>
#include "../../benchmark.h"
#define VERSION_MAJOR (CUDART_VERSION/1000)
#define VERSION_MINOR (CUDART_VERSION%100)/10
const char *sSDKsample = "threadFenceReduction";
#if CUDART_VERSION >= 2020
#include "threadFenceReduction_kernel.cuh"
#else
#pragma comment(user, "CUDA 2.2 is required to build for threadFenceReduction")
#endif
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
bool runTest(int argc, char **argv);
extern "C"
{
void reduce(int size, int threads, int blocks, float *d_idata, float *d_odata);
void reduceSinglePass(int size, int threads, int blocks, float *d_idata, float *d_odata);
}
#if CUDART_VERSION < 2020
void reduce(int size, int threads, int blocks, float *d_idata, float *d_odata)
{
printf("reduce(), compiler not supported, aborting tests\n");
}
void reduceSinglePass(int size, int threads, int blocks, float *d_idata, float *d_odata)
{
printf("reduceSinglePass(), compiler not supported, aborting tests\n");
}
#endif
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
cudaDeviceProp deviceProp;
deviceProp.major = 0;
deviceProp.minor = 0;
int dev;
// printf("%s Starting...\n\n", sSDKsample);
/*dev = findCudaDevice(argc, (const char **)argv);
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));*/
// printf("GPU Device supports SM %d.%d compute capability\n\n", deviceProp.major, deviceProp.minor);
bool bTestResult = false;
#if CUDART_VERSION >= 2020
bTestResult = runTest(argc, argv);
#else
print_NVCC_min_spec(sSDKsample, "2.2", "Version 185");
exit(EXIT_SUCCESS);
#endif
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
////////////////////////////////////////////////////////////////////////////////
//! Compute sum reduction on CPU
//! We use Kahan summation for an accurate sum of large arrays.
//! http://en.wikipedia.org/wiki/Kahan_summation_algorithm
//!
//! @param data pointer to input data
//! @param size number of input data elements
////////////////////////////////////////////////////////////////////////////////
template<class T>
T reduceCPU(T *data, int size)
{
T sum = data[0];
T c = (T)0.0;
for (int i = 1; i < size; i++)
{
T y = data[i] - c;
T t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
////////////////////////////////////////////////////////////////////////////////
// Compute the number of threads and blocks to use for the reduction
// We set threads / block to the minimum of maxThreads and n/2.
////////////////////////////////////////////////////////////////////////////////
void getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (n == 1)
{
threads = 1;
blocks = 1;
}
else
{
threads = (n < maxThreads*2) ? nextPow2(n / 2) : maxThreads;
blocks = max(1, n / (threads * 2));
}
blocks = min(maxBlocks, blocks);
}
////////////////////////////////////////////////////////////////////////////////
// This function performs a reduction of the input data multiple times and
// measures the average reduction time.
////////////////////////////////////////////////////////////////////////////////
float benchmarkReduce(int n,
int numThreads,
int numBlocks,
int maxThreads,
int maxBlocks,
int testIterations,
bool multiPass,
bool cpuFinalReduction,
int cpuFinalThreshold,
StopWatchInterface *timer,
float *h_odata,
float *d_idata,
float *d_odata)
{
float gpu_result = 0;
bool bNeedReadback = true;
cudaError_t error;
for (int i = 0; i < testIterations; ++i)
{
gpu_result = 0;
unsigned int retCnt = 0;
error = setRetirementCount(retCnt);
checkCudaErrors(error);
cudaDeviceSynchronize();
sdkStartTimer(&timer);
if (multiPass)
{
// execute the kernel
BENCHMARK.start_kernel();
reduce(n, numThreads, numBlocks, d_idata, d_odata);
BENCHMARK.end_kernel();
// check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
if (cpuFinalReduction)
{
// sum partial sums from each block on CPU
// copy result from device to host
error = cudaMemcpy(h_odata, d_odata, numBlocks*sizeof(float), cudaMemcpyDeviceToHost);
checkCudaErrors(error);
for (int i=0; i<numBlocks; i++)
{
gpu_result += h_odata[i];
}
bNeedReadback = false;
}
else
{
// sum partial block sums on GPU
int s=numBlocks;
while (s > cpuFinalThreshold)
{
int threads = 0, blocks = 0;
getNumBlocksAndThreads(s, maxBlocks, maxThreads, blocks, threads);
reduce(s, threads, blocks, d_odata, d_odata);
s = s / (threads*2);
}
if (s > 1)
{
// copy result from device to host
error = cudaMemcpy(h_odata, d_odata, s * sizeof(float), cudaMemcpyDeviceToHost);
checkCudaErrors(error);
for (int i=0; i < s; i++)
{
gpu_result += h_odata[i];
}
bNeedReadback = false;
}
}
}
else
{
getLastCudaError("Kernel execution failed");
// execute the kernel
BENCHMARK.start_kernel();
reduceSinglePass(n, numThreads, numBlocks, d_idata, d_odata);
BENCHMARK.end_kernel();
// check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
}
cudaDeviceSynchronize();
sdkStopTimer(&timer);
}
if (bNeedReadback)
{
// copy final sum from device to host
error = cudaMemcpy(&gpu_result, d_odata, sizeof(float), cudaMemcpyDeviceToHost);
checkCudaErrors(error);
}
return gpu_result;
}
////////////////////////////////////////////////////////////////////////////////
// This function calls benchmarkReduce multiple times for a range of array sizes
// and prints a report in CSV (comma-separated value) format that can be used for
// generating a "shmoo" plot showing the performance for each kernel variation
// over a wide range of input sizes.
////////////////////////////////////////////////////////////////////////////////
void shmoo(int minN, int maxN, int maxThreads, int maxBlocks)
{
// create random input data on CPU
unsigned int bytes = maxN * sizeof(float);
float *h_idata = (float *) malloc(bytes);
for (int i = 0; i < maxN; i++)
{
// Keep the numbers small so we don't get truncation error in the sum
h_idata[i] = (rand() & 0xFF) / (float)RAND_MAX;
}
int maxNumBlocks = min(65535, maxN / maxThreads);
// allocate mem for the result on host side
float *h_odata = (float *) malloc(maxNumBlocks*sizeof(float));
// allocate device memory and data
float *d_idata = NULL;
float *d_odata = NULL;
checkCudaErrors(cudaMalloc((void **) &d_idata, bytes));
checkCudaErrors(cudaMalloc((void **) &d_odata, maxNumBlocks*sizeof(float)));
// copy data directly to device memory
checkCudaErrors(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_odata, h_idata, maxNumBlocks*sizeof(float), cudaMemcpyHostToDevice));
// warm-up
reduce(maxN, maxThreads, maxNumBlocks, d_idata, d_odata);
int testIterations = 100;
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
// print headers
//printf("N, %d blocks one pass, %d blocks multipass\n", maxBlocks, maxBlocks);
for (int i = minN; i <= maxN; i *= 2)
{
printf("%d, ", i);
for (int multiPass = 0; multiPass <= 1; multiPass++)
{
sdkResetTimer(&timer);
int numBlocks = 0;
int numThreads = 0;
getNumBlocksAndThreads(i, maxBlocks, maxThreads, numBlocks, numThreads);
benchmarkReduce(i, numThreads, numBlocks, maxThreads, maxBlocks,
testIterations, multiPass==1, false, 1, timer, h_odata, d_idata, d_odata);
float reduceTime = sdkGetAverageTimerValue(&timer);
printf("%f%s", reduceTime, multiPass==0 ? ", " : "\n");
}
}
printf("\n");
// cleanup
sdkDeleteTimer(&timer);
free(h_idata);
free(h_odata);
cudaFree(d_idata);
cudaFree(d_odata);
}
////////////////////////////////////////////////////////////////////////////////
// The main function which runs the reduction test.
////////////////////////////////////////////////////////////////////////////////
bool
runTest(int argc, char **argv)
{
int size = 1<<20; // number of elements to reduce
int maxThreads = 128; // number of threads per block
int maxBlocks = 64;
bool cpuFinalReduction = false;
int cpuFinalThreshold = 1;
bool multipass = false;
bool bTestResult = false;
if (checkCmdLineFlag(argc, (const char **) argv, "b"))
{
size = getCmdLineArgumentInt(argc, (const char **)argv, "b");
}
if (checkCmdLineFlag(argc, (const char **) argv, "threads"))
{
maxThreads = getCmdLineArgumentInt(argc, (const char **)argv, "threads");
}
if (checkCmdLineFlag(argc, (const char **) argv, "maxblocks"))
{
maxBlocks = getCmdLineArgumentInt(argc, (const char **)argv, "maxblocks");
}
// printf("%d elements\n", size);
// printf("%d threads (max)\n", maxThreads);
cpuFinalReduction = checkCmdLineFlag(argc, (const char **) argv, "cpufinal");
multipass = checkCmdLineFlag(argc, (const char **) argv, "multipass");
if (checkCmdLineFlag(argc, (const char **) argv, "cputhresh"))
{
cpuFinalThreshold = getCmdLineArgumentInt(argc, (const char **) argv, "cputhresh");
}
bool runShmoo = checkCmdLineFlag(argc, (const char **) argv, "shmoo");
if (runShmoo)
{
shmoo(1, 33554432, maxThreads, maxBlocks);
}
else
{
// create random input data on CPU
unsigned int bytes = size * sizeof(float);
float *h_idata = (float *) malloc(bytes);
for (int i=0; i<size; i++)
{
// Keep the numbers small so we don't get truncation error in the sum
h_idata[i] = (rand() & 0xFF) / (float)RAND_MAX;
}
int numBlocks = 0;
int numThreads = 0;
getNumBlocksAndThreads(size, maxBlocks, maxThreads, numBlocks, numThreads);
if (numBlocks == 1)
{
cpuFinalThreshold = 1;
}
// allocate mem for the result on host side
float *h_odata = (float *) malloc(numBlocks*sizeof(float));
//printf("%d blocks\n", numBlocks);
// allocate device memory and data
float *d_idata = NULL;
float *d_odata = NULL;
BENCHMARK.start_total();
checkCudaErrors(cudaMalloc((void **) &d_idata, bytes));
checkCudaErrors(cudaMalloc((void **) &d_odata, numBlocks*sizeof(float)));
// copy data directly to device memory
checkCudaErrors(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_odata, h_idata, numBlocks*sizeof(float), cudaMemcpyHostToDevice));
// warm-up
reduce(size, numThreads, numBlocks, d_idata, d_odata);
int testIterations = 100;
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
float gpu_result = 0;
gpu_result = benchmarkReduce(size, numThreads, numBlocks, maxThreads, maxBlocks,
testIterations, multipass, cpuFinalReduction,
cpuFinalThreshold, timer, h_odata, d_idata, d_odata);
BENCHMARK.end_total();
float reduceTime = sdkGetAverageTimerValue(&timer);
//printf("Average time: %f ms\n", reduceTime);
//printf("Bandwidth: %f GB/s\n\n", (size * sizeof(int)) / (reduceTime * 1.0e6));
// compute reference solution
float cpu_result = reduceCPU<float>(h_idata, size);
// printf("GPU result = %0.12f\n", gpu_result);
// printf("CPU result = %0.12f\n", cpu_result);
double threshold = 1e-8 * size;
double diff = abs((double)gpu_result - (double)cpu_result);
bTestResult = (diff < threshold);
if(diff > threshold)
BENCHMARK.fail();
// cleanup
sdkDeleteTimer(&timer);
free(h_idata);
free(h_odata);
cudaFree(d_idata);
cudaFree(d_odata);
}
return bTestResult;
}
|
3284275ba08e19c7faebf9d1c6d0743f226d8caf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: dnlebard
#include "HarmonicAngleForceGPU.cuh"
#include "hoomd/TextureTools.h"
#include <assert.h>
// SMALL a relatively small number
#define SMALL Scalar(0.001)
/*! \file HarmonicAngleForceGPU.cu
\brief Defines GPU kernel code for calculating the harmonic angle forces. Used by HarmonicAngleForceComputeGPU.
*/
//! Kernel for calculating harmonic angle forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch Pitch of 2D virial array
\param N number of particles
\param d_pos device array of particle positions
\param d_params Parameters for the angle force
\param box Box dimensions for periodic boundary condition handling
\param alist Angle data to use in calculating the forces
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
*/
extern "C" __global__ void gpu_compute_harmonic_angle_forces_kernel(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar2 *d_params,
BoxDim box,
const group_storage<3> *alist,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list for this thread (MEM TRANSFER: 4 bytes)
int n_angles = n_angles_list[idx];
// read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes)
Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c triplet
Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z);
Scalar3 a_pos,b_pos,c_pos; // allocate space for the a,b, and c atom in the a-b-c triplet
// initialize the force to 0
Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
Scalar fab[3], fcb[3];
// initialize the virial to 0
Scalar virial[6];
for (int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
// loop over all angles
for (int angle_idx = 0; angle_idx < n_angles; angle_idx++)
{
group_storage<3> cur_angle = alist[pitch*angle_idx + idx];
int cur_angle_x_idx = cur_angle.idx[0];
int cur_angle_y_idx = cur_angle.idx[1];
int cur_angle_type = cur_angle.idx[2];
int cur_angle_abc = apos_list[pitch*angle_idx + idx];
// get the a-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 x_postype = d_pos[cur_angle_x_idx];
Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 y_postype = d_pos[cur_angle_y_idx];
Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z);
if (cur_angle_abc == 0)
{
a_pos = idx_pos;
b_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 1)
{
b_pos = idx_pos;
a_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 2)
{
c_pos = idx_pos;
a_pos = x_pos;
b_pos = y_pos;
}
// calculate dr for a-b,c-b,and a-c
Scalar3 dab = a_pos - b_pos;
Scalar3 dcb = c_pos - b_pos;
Scalar3 dac = a_pos - c_pos;
// apply periodic boundary conditions
dab = box.minImage(dab);
dcb = box.minImage(dcb);
dac = box.minImage(dac);
// get the angle parameters (MEM TRANSFER: 8 bytes)
Scalar2 params = __ldg(d_params + cur_angle_type);
Scalar K = params.x;
Scalar t_0 = params.y;
Scalar rsqab = dot(dab, dab);
Scalar rab = sqrtf(rsqab);
Scalar rsqcb = dot(dcb, dcb);
Scalar rcb = sqrtf(rsqcb);
Scalar c_abbc = dot(dab, dcb);
c_abbc /= rab*rcb;
if (c_abbc > Scalar(1.0)) c_abbc = Scalar(1.0);
if (c_abbc < -Scalar(1.0)) c_abbc = -Scalar(1.0);
Scalar s_abbc = sqrtf(Scalar(1.0) - c_abbc*c_abbc);
if (s_abbc < SMALL) s_abbc = SMALL;
s_abbc = Scalar(1.0)/s_abbc;
// actually calculate the force
Scalar dth = fast::acos(c_abbc) - t_0;
Scalar tk = K*dth;
Scalar a = -Scalar(1.0) * tk * s_abbc;
Scalar a11 = a*c_abbc/rsqab;
Scalar a12 = -a / (rab*rcb);
Scalar a22 = a*c_abbc / rsqcb;
fab[0] = a11*dab.x + a12*dcb.x;
fab[1] = a11*dab.y + a12*dcb.y;
fab[2] = a11*dab.z + a12*dcb.z;
fcb[0] = a22*dcb.x + a12*dab.x;
fcb[1] = a22*dcb.y + a12*dab.y;
fcb[2] = a22*dcb.z + a12*dab.z;
// compute 1/3 of the energy, 1/3 for each atom in the angle
Scalar angle_eng = tk*dth*Scalar(Scalar(1.0)/Scalar(6.0));
// upper triangular version of virial tensor
Scalar angle_virial[6];
angle_virial[0] = Scalar(1./3.)*(dab.x*fab[0] + dcb.x*fcb[0]);
angle_virial[1] = Scalar(1./3.)*(dab.y*fab[0] + dcb.y*fcb[0]);
angle_virial[2] = Scalar(1./3.)*(dab.z*fab[0] + dcb.z*fcb[0]);
angle_virial[3] = Scalar(1./3.)*(dab.y*fab[1] + dcb.y*fcb[1]);
angle_virial[4] = Scalar(1./3.)*(dab.z*fab[1] + dcb.z*fcb[1]);
angle_virial[5] = Scalar(1./3.)*(dab.z*fab[2] + dcb.z*fcb[2]);
if (cur_angle_abc == 0)
{
force_idx.x += fab[0];
force_idx.y += fab[1];
force_idx.z += fab[2];
}
if (cur_angle_abc == 1)
{
force_idx.x -= fab[0] + fcb[0];
force_idx.y -= fab[1] + fcb[1];
force_idx.z -= fab[2] + fcb[2];
}
if (cur_angle_abc == 2)
{
force_idx.x += fcb[0];
force_idx.y += fcb[1];
force_idx.z += fcb[2];
}
force_idx.w += angle_eng;
for (int i = 0; i < 6; i++)
virial[i] += angle_virial[i];
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes)
d_force[idx] = force_idx;
for (int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = virial[i];
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param N number of particles
\param d_pos device array of particle positions
\param box Box dimensions (in GPU format) to use for periodic boundary conditions
\param atable List of angles stored on the GPU
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
\param d_params K and t_0 params packed as Scalar2 variables
\param n_angle_types Number of angle types in d_params
\param block_size Block size to use when performing calculations
\param compute_capability Device compute capability (200, 300, 350, ...)
\returns Any error code resulting from the kernel launch
\note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize()
\a d_params should include one Scalar2 element per angle type. The x component contains K the spring constant
and the y component contains t_0 the equilibrium angle.
*/
hipError_t gpu_compute_harmonic_angle_forces(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim& box,
const group_storage<3> *atable,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list,
Scalar2 *d_params,
unsigned int n_angle_types,
int block_size)
{
assert(d_params);
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_compute_harmonic_angle_forces_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// setup the grid to run the kernel
dim3 grid( N / run_block_size + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_compute_harmonic_angle_forces_kernel), dim3(grid), dim3(threads), 0, 0, d_force, d_virial, virial_pitch, N, d_pos, d_params, box,
atable, apos_list, pitch, n_angles_list);
return hipSuccess;
}
|
3284275ba08e19c7faebf9d1c6d0743f226d8caf.cu
|
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: dnlebard
#include "HarmonicAngleForceGPU.cuh"
#include "hoomd/TextureTools.h"
#include <assert.h>
// SMALL a relatively small number
#define SMALL Scalar(0.001)
/*! \file HarmonicAngleForceGPU.cu
\brief Defines GPU kernel code for calculating the harmonic angle forces. Used by HarmonicAngleForceComputeGPU.
*/
//! Kernel for calculating harmonic angle forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch Pitch of 2D virial array
\param N number of particles
\param d_pos device array of particle positions
\param d_params Parameters for the angle force
\param box Box dimensions for periodic boundary condition handling
\param alist Angle data to use in calculating the forces
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
*/
extern "C" __global__ void gpu_compute_harmonic_angle_forces_kernel(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar2 *d_params,
BoxDim box,
const group_storage<3> *alist,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list for this thread (MEM TRANSFER: 4 bytes)
int n_angles = n_angles_list[idx];
// read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes)
Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c triplet
Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z);
Scalar3 a_pos,b_pos,c_pos; // allocate space for the a,b, and c atom in the a-b-c triplet
// initialize the force to 0
Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
Scalar fab[3], fcb[3];
// initialize the virial to 0
Scalar virial[6];
for (int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
// loop over all angles
for (int angle_idx = 0; angle_idx < n_angles; angle_idx++)
{
group_storage<3> cur_angle = alist[pitch*angle_idx + idx];
int cur_angle_x_idx = cur_angle.idx[0];
int cur_angle_y_idx = cur_angle.idx[1];
int cur_angle_type = cur_angle.idx[2];
int cur_angle_abc = apos_list[pitch*angle_idx + idx];
// get the a-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 x_postype = d_pos[cur_angle_x_idx];
Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 y_postype = d_pos[cur_angle_y_idx];
Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z);
if (cur_angle_abc == 0)
{
a_pos = idx_pos;
b_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 1)
{
b_pos = idx_pos;
a_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 2)
{
c_pos = idx_pos;
a_pos = x_pos;
b_pos = y_pos;
}
// calculate dr for a-b,c-b,and a-c
Scalar3 dab = a_pos - b_pos;
Scalar3 dcb = c_pos - b_pos;
Scalar3 dac = a_pos - c_pos;
// apply periodic boundary conditions
dab = box.minImage(dab);
dcb = box.minImage(dcb);
dac = box.minImage(dac);
// get the angle parameters (MEM TRANSFER: 8 bytes)
Scalar2 params = __ldg(d_params + cur_angle_type);
Scalar K = params.x;
Scalar t_0 = params.y;
Scalar rsqab = dot(dab, dab);
Scalar rab = sqrtf(rsqab);
Scalar rsqcb = dot(dcb, dcb);
Scalar rcb = sqrtf(rsqcb);
Scalar c_abbc = dot(dab, dcb);
c_abbc /= rab*rcb;
if (c_abbc > Scalar(1.0)) c_abbc = Scalar(1.0);
if (c_abbc < -Scalar(1.0)) c_abbc = -Scalar(1.0);
Scalar s_abbc = sqrtf(Scalar(1.0) - c_abbc*c_abbc);
if (s_abbc < SMALL) s_abbc = SMALL;
s_abbc = Scalar(1.0)/s_abbc;
// actually calculate the force
Scalar dth = fast::acos(c_abbc) - t_0;
Scalar tk = K*dth;
Scalar a = -Scalar(1.0) * tk * s_abbc;
Scalar a11 = a*c_abbc/rsqab;
Scalar a12 = -a / (rab*rcb);
Scalar a22 = a*c_abbc / rsqcb;
fab[0] = a11*dab.x + a12*dcb.x;
fab[1] = a11*dab.y + a12*dcb.y;
fab[2] = a11*dab.z + a12*dcb.z;
fcb[0] = a22*dcb.x + a12*dab.x;
fcb[1] = a22*dcb.y + a12*dab.y;
fcb[2] = a22*dcb.z + a12*dab.z;
// compute 1/3 of the energy, 1/3 for each atom in the angle
Scalar angle_eng = tk*dth*Scalar(Scalar(1.0)/Scalar(6.0));
// upper triangular version of virial tensor
Scalar angle_virial[6];
angle_virial[0] = Scalar(1./3.)*(dab.x*fab[0] + dcb.x*fcb[0]);
angle_virial[1] = Scalar(1./3.)*(dab.y*fab[0] + dcb.y*fcb[0]);
angle_virial[2] = Scalar(1./3.)*(dab.z*fab[0] + dcb.z*fcb[0]);
angle_virial[3] = Scalar(1./3.)*(dab.y*fab[1] + dcb.y*fcb[1]);
angle_virial[4] = Scalar(1./3.)*(dab.z*fab[1] + dcb.z*fcb[1]);
angle_virial[5] = Scalar(1./3.)*(dab.z*fab[2] + dcb.z*fcb[2]);
if (cur_angle_abc == 0)
{
force_idx.x += fab[0];
force_idx.y += fab[1];
force_idx.z += fab[2];
}
if (cur_angle_abc == 1)
{
force_idx.x -= fab[0] + fcb[0];
force_idx.y -= fab[1] + fcb[1];
force_idx.z -= fab[2] + fcb[2];
}
if (cur_angle_abc == 2)
{
force_idx.x += fcb[0];
force_idx.y += fcb[1];
force_idx.z += fcb[2];
}
force_idx.w += angle_eng;
for (int i = 0; i < 6; i++)
virial[i] += angle_virial[i];
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes)
d_force[idx] = force_idx;
for (int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = virial[i];
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param N number of particles
\param d_pos device array of particle positions
\param box Box dimensions (in GPU format) to use for periodic boundary conditions
\param atable List of angles stored on the GPU
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
\param d_params K and t_0 params packed as Scalar2 variables
\param n_angle_types Number of angle types in d_params
\param block_size Block size to use when performing calculations
\param compute_capability Device compute capability (200, 300, 350, ...)
\returns Any error code resulting from the kernel launch
\note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize()
\a d_params should include one Scalar2 element per angle type. The x component contains K the spring constant
and the y component contains t_0 the equilibrium angle.
*/
cudaError_t gpu_compute_harmonic_angle_forces(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim& box,
const group_storage<3> *atable,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list,
Scalar2 *d_params,
unsigned int n_angle_types,
int block_size)
{
assert(d_params);
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_compute_harmonic_angle_forces_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// setup the grid to run the kernel
dim3 grid( N / run_block_size + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
gpu_compute_harmonic_angle_forces_kernel<<< grid, threads>>>(d_force, d_virial, virial_pitch, N, d_pos, d_params, box,
atable, apos_list, pitch, n_angles_list);
return cudaSuccess;
}
|
e18ddf15e7fe88c55e98a524b9e471b6f5735136.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <algorithm>
#include <stdio.h>
#include "roi_extractor.h"
#include "amir_cuda_util/cuda_util.h"
namespace amirstan
{
namespace plugin
{
using namespace amirstan::cuda;
const int kMAX_FEATMAP_SIZE=10;
struct FeatData{
const void* data[kMAX_FEATMAP_SIZE];
int batch_size;
int channels;
int h[kMAX_FEATMAP_SIZE];
int w[kMAX_FEATMAP_SIZE];
float spatial_scale[kMAX_FEATMAP_SIZE];
int num_featmap;
};
template <typename scalar_t>
__device__ scalar_t bilinear_interpolate(const scalar_t *bottom_data,
const int height, const int width,
scalar_t y, scalar_t x) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly;
scalar_t hx = 1. - lx;
// do bilinear interpolation
scalar_t lt = bottom_data[y_low * width + x_low];
scalar_t rt = bottom_data[y_low * width + x_high];
scalar_t lb = bottom_data[y_high * width + x_low];
scalar_t rb = bottom_data[y_high * width + x_high];
scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
scalar_t val = (w1 * lt + w2 * rt + w3 * lb + w4 * rb);
return val;
}
template <typename scalar_t>
__device__ scalar_t roi_align_single(const scalar_t *bottom_data,
const int roi_batch_ind,
const scalar_t roi_start_w,
const scalar_t roi_start_h,
const scalar_t roi_end_w,
const scalar_t roi_end_h,
const scalar_t spatial_scale,
const int pw, const int ph, const int c,
const int sample_num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width) {
// Force malformed ROIs to be 1x1
const scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.);
const scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.);
const scalar_t bin_size_h = roi_height / pooled_height;
const scalar_t bin_size_w = roi_width / pooled_width;
const scalar_t *offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
int sample_num_h = (sample_num > 0)
? sample_num
: ceil(roi_height / pooled_height); // e.g., = 2
int sample_num_w =
(sample_num > 0) ? sample_num : ceil(roi_width / pooled_width);
scalar_t output_val = 0;
#pragma unroll
for (int iy = 0; iy < sample_num_h; iy++) {
const scalar_t y = roi_start_h + ph * bin_size_h +
(scalar_t)(iy + scalar_t(.5f)) * bin_size_h /
(scalar_t)(sample_num_h);
#pragma unroll
for (int ix = 0; ix < sample_num_w; ix++) {
const scalar_t x = roi_start_w + pw * bin_size_w +
(scalar_t)(ix + scalar_t(.5f)) * bin_size_w /
(scalar_t)(sample_num_w);
scalar_t val = bilinear_interpolate<scalar_t>(offset_bottom_data,
height, width, y, x);
output_val += val;
}
}
output_val /= (sample_num_h * sample_num_w);
return output_val;
}
template<typename scalar_t>
__global__ void roi_extractor_kernel(
scalar_t* output,
const scalar_t *bottom_rois,
FeatData feat_data,
const int sample_num, const float roi_scale_factor, const int finest_scale,
const int pooled_height, const int pooled_width,
int nThreads){
CUDA_KERNEL_LOOP(index, nThreads){
const int channels = feat_data.channels;
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
scalar_t roi_offset_x0 = offset_bottom_rois[1];
scalar_t roi_offset_y0 = offset_bottom_rois[2];
scalar_t roi_offset_x1 = offset_bottom_rois[3];
scalar_t roi_offset_y1 = offset_bottom_rois[4];
const scalar_t scale = sqrtf((roi_offset_y1 - roi_offset_y0 + 1.)*(roi_offset_x1 - roi_offset_x0 + 1.));
const int target_lvls = fminf(feat_data.num_featmap-1, fmaxf(0,floorf(log2f(scale/(scalar_t)(finest_scale)+1e-6))));
if(roi_scale_factor>0.){
const scalar_t roi_off_cx = (roi_offset_x0+roi_offset_x1)*0.5;
const scalar_t roi_off_cy = (roi_offset_y0+roi_offset_y1)*0.5;
const scalar_t roi_off_w = (roi_offset_x1-roi_offset_x0 +1)*roi_scale_factor;
const scalar_t roi_off_h = (roi_offset_y1-roi_offset_y0 +1)*roi_scale_factor;
roi_offset_x0 = roi_off_cx - roi_off_w*0.5 + 0.5;
roi_offset_x1 = roi_off_cx + roi_off_w*0.5 - 0.5;
roi_offset_y0 = roi_off_cy - roi_off_h*0.5 + 0.5;
roi_offset_y1 = roi_off_cy + roi_off_h*0.5 - 0.5;
}
const scalar_t spatial_scale = (scalar_t)feat_data.spatial_scale[target_lvls];
const int height = feat_data.h[target_lvls];
const int width = feat_data.w[target_lvls];
const scalar_t *bottom_data = (scalar_t*)feat_data.data[target_lvls];
const int roi_batch_ind = offset_bottom_rois[0];
const scalar_t roi_start_w = roi_offset_x0 * spatial_scale;
const scalar_t roi_start_h = roi_offset_y0 * spatial_scale;
const scalar_t roi_end_w = (roi_offset_x1 + 1) * spatial_scale;
const scalar_t roi_end_h = (roi_offset_y1 + 1) * spatial_scale;
const scalar_t output_val = roi_align_single<scalar_t>(bottom_data,
roi_batch_ind,
roi_start_w,
roi_start_h,
roi_end_w,
roi_end_h,
spatial_scale,
pw, ph, c,
sample_num, channels,
height, width,
pooled_height, pooled_width);
output[index] = output_val;
}
}
template<typename T>
void roi_extractor(T* output,
const T* rois,
int num_rois,
const void *const *feats,
int num_feats,
int n,
int c,
int *h,
int *w,
int *strides,
int out_size,
int sample_num,
float roi_scale_factor,
int finest_scale,
hipStream_t stream){
FeatData feat_data;
feat_data.batch_size = n;
feat_data.channels = c;
feat_data.num_featmap = num_feats;
for(int i=0;i< num_feats;++i){
feat_data.data[i] = feats[i];
feat_data.h[i] = h[i];
feat_data.w[i] = w[i];
feat_data.spatial_scale[i] = 1./float(strides[i]);
}
int pooled_height = out_size;
int pooled_width = out_size;
int nThreads = num_rois * c * pooled_height * pooled_width;
hipLaunchKernelGGL(( roi_extractor_kernel<T>), dim3(GET_BLOCKS(nThreads)), dim3(CUDA_NUM_THREADS),0,stream,
output, rois,
feat_data,
sample_num, roi_scale_factor, finest_scale,
pooled_height, pooled_width,
nThreads);
}
template void roi_extractor<float>(float* output,
const float* rois,
int num_rois,
const void *const *feats,
int num_feats,
int n,
int c,
int *h,
int *w,
int *strides,
int out_size,
int sample_num,
float roi_scale_factor,
int finest_scale,
hipStream_t stream);
}
}
|
e18ddf15e7fe88c55e98a524b9e471b6f5735136.cu
|
#include <cmath>
#include <algorithm>
#include <stdio.h>
#include "roi_extractor.h"
#include "amir_cuda_util/cuda_util.h"
namespace amirstan
{
namespace plugin
{
using namespace amirstan::cuda;
const int kMAX_FEATMAP_SIZE=10;
struct FeatData{
const void* data[kMAX_FEATMAP_SIZE];
int batch_size;
int channels;
int h[kMAX_FEATMAP_SIZE];
int w[kMAX_FEATMAP_SIZE];
float spatial_scale[kMAX_FEATMAP_SIZE];
int num_featmap;
};
template <typename scalar_t>
__device__ scalar_t bilinear_interpolate(const scalar_t *bottom_data,
const int height, const int width,
scalar_t y, scalar_t x) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly;
scalar_t hx = 1. - lx;
// do bilinear interpolation
scalar_t lt = bottom_data[y_low * width + x_low];
scalar_t rt = bottom_data[y_low * width + x_high];
scalar_t lb = bottom_data[y_high * width + x_low];
scalar_t rb = bottom_data[y_high * width + x_high];
scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
scalar_t val = (w1 * lt + w2 * rt + w3 * lb + w4 * rb);
return val;
}
template <typename scalar_t>
__device__ scalar_t roi_align_single(const scalar_t *bottom_data,
const int roi_batch_ind,
const scalar_t roi_start_w,
const scalar_t roi_start_h,
const scalar_t roi_end_w,
const scalar_t roi_end_h,
const scalar_t spatial_scale,
const int pw, const int ph, const int c,
const int sample_num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width) {
// Force malformed ROIs to be 1x1
const scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.);
const scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.);
const scalar_t bin_size_h = roi_height / pooled_height;
const scalar_t bin_size_w = roi_width / pooled_width;
const scalar_t *offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
int sample_num_h = (sample_num > 0)
? sample_num
: ceil(roi_height / pooled_height); // e.g., = 2
int sample_num_w =
(sample_num > 0) ? sample_num : ceil(roi_width / pooled_width);
scalar_t output_val = 0;
#pragma unroll
for (int iy = 0; iy < sample_num_h; iy++) {
const scalar_t y = roi_start_h + ph * bin_size_h +
(scalar_t)(iy + scalar_t(.5f)) * bin_size_h /
(scalar_t)(sample_num_h);
#pragma unroll
for (int ix = 0; ix < sample_num_w; ix++) {
const scalar_t x = roi_start_w + pw * bin_size_w +
(scalar_t)(ix + scalar_t(.5f)) * bin_size_w /
(scalar_t)(sample_num_w);
scalar_t val = bilinear_interpolate<scalar_t>(offset_bottom_data,
height, width, y, x);
output_val += val;
}
}
output_val /= (sample_num_h * sample_num_w);
return output_val;
}
template<typename scalar_t>
__global__ void roi_extractor_kernel(
scalar_t* output,
const scalar_t *bottom_rois,
FeatData feat_data,
const int sample_num, const float roi_scale_factor, const int finest_scale,
const int pooled_height, const int pooled_width,
int nThreads){
CUDA_KERNEL_LOOP(index, nThreads){
const int channels = feat_data.channels;
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
scalar_t roi_offset_x0 = offset_bottom_rois[1];
scalar_t roi_offset_y0 = offset_bottom_rois[2];
scalar_t roi_offset_x1 = offset_bottom_rois[3];
scalar_t roi_offset_y1 = offset_bottom_rois[4];
const scalar_t scale = sqrtf((roi_offset_y1 - roi_offset_y0 + 1.)*(roi_offset_x1 - roi_offset_x0 + 1.));
const int target_lvls = fminf(feat_data.num_featmap-1, fmaxf(0,floorf(log2f(scale/(scalar_t)(finest_scale)+1e-6))));
if(roi_scale_factor>0.){
const scalar_t roi_off_cx = (roi_offset_x0+roi_offset_x1)*0.5;
const scalar_t roi_off_cy = (roi_offset_y0+roi_offset_y1)*0.5;
const scalar_t roi_off_w = (roi_offset_x1-roi_offset_x0 +1)*roi_scale_factor;
const scalar_t roi_off_h = (roi_offset_y1-roi_offset_y0 +1)*roi_scale_factor;
roi_offset_x0 = roi_off_cx - roi_off_w*0.5 + 0.5;
roi_offset_x1 = roi_off_cx + roi_off_w*0.5 - 0.5;
roi_offset_y0 = roi_off_cy - roi_off_h*0.5 + 0.5;
roi_offset_y1 = roi_off_cy + roi_off_h*0.5 - 0.5;
}
const scalar_t spatial_scale = (scalar_t)feat_data.spatial_scale[target_lvls];
const int height = feat_data.h[target_lvls];
const int width = feat_data.w[target_lvls];
const scalar_t *bottom_data = (scalar_t*)feat_data.data[target_lvls];
const int roi_batch_ind = offset_bottom_rois[0];
const scalar_t roi_start_w = roi_offset_x0 * spatial_scale;
const scalar_t roi_start_h = roi_offset_y0 * spatial_scale;
const scalar_t roi_end_w = (roi_offset_x1 + 1) * spatial_scale;
const scalar_t roi_end_h = (roi_offset_y1 + 1) * spatial_scale;
const scalar_t output_val = roi_align_single<scalar_t>(bottom_data,
roi_batch_ind,
roi_start_w,
roi_start_h,
roi_end_w,
roi_end_h,
spatial_scale,
pw, ph, c,
sample_num, channels,
height, width,
pooled_height, pooled_width);
output[index] = output_val;
}
}
template<typename T>
void roi_extractor(T* output,
const T* rois,
int num_rois,
const void *const *feats,
int num_feats,
int n,
int c,
int *h,
int *w,
int *strides,
int out_size,
int sample_num,
float roi_scale_factor,
int finest_scale,
cudaStream_t stream){
FeatData feat_data;
feat_data.batch_size = n;
feat_data.channels = c;
feat_data.num_featmap = num_feats;
for(int i=0;i< num_feats;++i){
feat_data.data[i] = feats[i];
feat_data.h[i] = h[i];
feat_data.w[i] = w[i];
feat_data.spatial_scale[i] = 1./float(strides[i]);
}
int pooled_height = out_size;
int pooled_width = out_size;
int nThreads = num_rois * c * pooled_height * pooled_width;
roi_extractor_kernel<T><<<GET_BLOCKS(nThreads), CUDA_NUM_THREADS,0,stream>>>(
output, rois,
feat_data,
sample_num, roi_scale_factor, finest_scale,
pooled_height, pooled_width,
nThreads);
}
template void roi_extractor<float>(float* output,
const float* rois,
int num_rois,
const void *const *feats,
int num_feats,
int n,
int c,
int *h,
int *w,
int *strides,
int out_size,
int sample_num,
float roi_scale_factor,
int finest_scale,
cudaStream_t stream);
}
}
|
e6a04a2142051496545074bdc62e4b40aaf69f6b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <ctime>
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
template< typename T >
void swap( T& a, T& b ) {
T t = a;
a = b;
b = t;
}
struct DataBlock
{
int *outbitmap;
int *dev_in;
int *dev_out;
int *bitmap;
};
__global__ void update(int *in, int *out, int dim){
int offset = threadIdx.x + blockIdx.x * blockDim.x;
int x = offset % dim;
int y = (int)(offset / dim);
while (offset < dim * dim) {
int sum = 0;
for(int i=-1; i < 2; i++) {
for(int j=-1; j < 2; j++) {
int xtemp = (x + i + dim) % dim;
int ytemp = (y + j + dim) % dim;
int offsettemp = xtemp + ytemp * dim;
sum = sum + in[offsettemp];
}
}
sum = sum - in[offset];
if (in[offset] == 1) {
if (sum == 2 || sum ==3) {
out[offset] = 1;
}
else {
out[offset] = 0;
}
}
else {
if( sum == 3) {
out[offset] = 1;
}
else {
out[offset] = 0;
}
}
offset = offset + blockDim.x * gridDim.x;
}
}
int main(int argc, char *argv[]) {
clock_t start;
clock_t gpu_start;
float gpu_comp_time = 0;
float gpu_mem_to_time = 0, gpu_mem_back_time=0;
int dim = atoi(argv[1]);
int nStep = atoi(argv[2]);
// int frequency = atoi(argv[3]);
int size = dim * dim;
int step;
DataBlock data;
data.bitmap=(int *)malloc(size * sizeof(int));
for (int i = 0; i < size; i++) {
data.bitmap[i] = 0;
}
data.bitmap[1]=1;
data.bitmap[dim+2] = 1;
data.bitmap[2 * dim + 0] = 1;
data.bitmap[2 * dim + 1] = 1;
data.bitmap[2 * dim + 2] = 1;
data.outbitmap=(int *)malloc(size * sizeof(int));
int bitmapSize=size * sizeof(int);
start=clock();
gpu_start = clock();
HANDLE_ERROR(hipMalloc( (void **)&(data.dev_in), bitmapSize));
HANDLE_ERROR(hipMalloc( (void **)&(data.dev_out), bitmapSize));
HANDLE_ERROR(hipMemcpy(data.dev_in, data.bitmap, bitmapSize, hipMemcpyHostToDevice));
gpu_mem_to_time = ((float)(clock() - gpu_start)) / CLOCKS_PER_SEC;
// dim3 dimgrid(dim / 16, dim / 16);
// dim3 dimblock(16, 16);
int grid_dim;
int block_dim;
if (dim < 1024) {
grid_dim = dim;
block_dim = dim;
}
else {
grid_dim = 1024;
block_dim = 1024;
}
gpu_start = clock();
for(step = 0; step < nStep; step++ ){
hipLaunchKernelGGL(( update), dim3(grid_dim), dim3(block_dim), 0, 0, data.dev_in, data.dev_out,dim);
swap(data.dev_in,data.dev_out);
}
hipDeviceSynchronize();
gpu_comp_time = ((float)(clock() - gpu_start)) / CLOCKS_PER_SEC;
gpu_start = clock();
HANDLE_ERROR(hipMemcpy(data.outbitmap, data.dev_out, bitmapSize, hipMemcpyDeviceToHost));
gpu_mem_back_time = ((float)(clock() - gpu_start)) / CLOCKS_PER_SEC;
HANDLE_ERROR(hipFree(data.dev_out));
HANDLE_ERROR(hipFree(data.dev_in));
printf("%f %f %f ", gpu_comp_time, gpu_mem_to_time, gpu_mem_back_time);
printf("%f\n", ((float)(clock() - start)) / CLOCKS_PER_SEC);
}
|
e6a04a2142051496545074bdc62e4b40aaf69f6b.cu
|
#include <stdio.h>
#include <ctime>
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
template< typename T >
void swap( T& a, T& b ) {
T t = a;
a = b;
b = t;
}
struct DataBlock
{
int *outbitmap;
int *dev_in;
int *dev_out;
int *bitmap;
};
__global__ void update(int *in, int *out, int dim){
int offset = threadIdx.x + blockIdx.x * blockDim.x;
int x = offset % dim;
int y = (int)(offset / dim);
while (offset < dim * dim) {
int sum = 0;
for(int i=-1; i < 2; i++) {
for(int j=-1; j < 2; j++) {
int xtemp = (x + i + dim) % dim;
int ytemp = (y + j + dim) % dim;
int offsettemp = xtemp + ytemp * dim;
sum = sum + in[offsettemp];
}
}
sum = sum - in[offset];
if (in[offset] == 1) {
if (sum == 2 || sum ==3) {
out[offset] = 1;
}
else {
out[offset] = 0;
}
}
else {
if( sum == 3) {
out[offset] = 1;
}
else {
out[offset] = 0;
}
}
offset = offset + blockDim.x * gridDim.x;
}
}
int main(int argc, char *argv[]) {
clock_t start;
clock_t gpu_start;
float gpu_comp_time = 0;
float gpu_mem_to_time = 0, gpu_mem_back_time=0;
int dim = atoi(argv[1]);
int nStep = atoi(argv[2]);
// int frequency = atoi(argv[3]);
int size = dim * dim;
int step;
DataBlock data;
data.bitmap=(int *)malloc(size * sizeof(int));
for (int i = 0; i < size; i++) {
data.bitmap[i] = 0;
}
data.bitmap[1]=1;
data.bitmap[dim+2] = 1;
data.bitmap[2 * dim + 0] = 1;
data.bitmap[2 * dim + 1] = 1;
data.bitmap[2 * dim + 2] = 1;
data.outbitmap=(int *)malloc(size * sizeof(int));
int bitmapSize=size * sizeof(int);
start=clock();
gpu_start = clock();
HANDLE_ERROR(cudaMalloc( (void **)&(data.dev_in), bitmapSize));
HANDLE_ERROR(cudaMalloc( (void **)&(data.dev_out), bitmapSize));
HANDLE_ERROR(cudaMemcpy(data.dev_in, data.bitmap, bitmapSize, cudaMemcpyHostToDevice));
gpu_mem_to_time = ((float)(clock() - gpu_start)) / CLOCKS_PER_SEC;
// dim3 dimgrid(dim / 16, dim / 16);
// dim3 dimblock(16, 16);
int grid_dim;
int block_dim;
if (dim < 1024) {
grid_dim = dim;
block_dim = dim;
}
else {
grid_dim = 1024;
block_dim = 1024;
}
gpu_start = clock();
for(step = 0; step < nStep; step++ ){
update<<<grid_dim, block_dim>>>(data.dev_in, data.dev_out,dim);
swap(data.dev_in,data.dev_out);
}
cudaDeviceSynchronize();
gpu_comp_time = ((float)(clock() - gpu_start)) / CLOCKS_PER_SEC;
gpu_start = clock();
HANDLE_ERROR(cudaMemcpy(data.outbitmap, data.dev_out, bitmapSize, cudaMemcpyDeviceToHost));
gpu_mem_back_time = ((float)(clock() - gpu_start)) / CLOCKS_PER_SEC;
HANDLE_ERROR(cudaFree(data.dev_out));
HANDLE_ERROR(cudaFree(data.dev_in));
printf("%f %f %f ", gpu_comp_time, gpu_mem_to_time, gpu_mem_back_time);
printf("%f\n", ((float)(clock() - start)) / CLOCKS_PER_SEC);
}
|
f7b4ac50da2bfbd398991f0a5eca5b21abe3d9cf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2018 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
//#include "realm/runtime_impl.h"
//#include "realm/cuda/cuda_module.h"
FFHandler UtilityTasks::init_cuda_task(
const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, HighLevelRuntime *runtime)
{
assert(regions.size() == 0);
assert(task->arglen == sizeof(size_t));
size_t workSpaceSize = *(const size_t*) task->args;
printf("workSpaceSize (%d MB)\n", workSpaceSize / 1024 / 1024);
FFHandler handle;
handle.workSpaceSize = workSpaceSize;
checkCUDA(hipblasCreate(&handle.blas));
checkCUDNN(cudnnCreate(&handle.dnn));
//std::set<Memory> memFB;
//assert(memFB.size() == 1);
//assert(memFB.begin()->kind() == Memory::GPU_FB_MEM);
//Realm::MemoryImpl* memImpl =
// Realm::get_runtime()->get_memory_impl(*memFB.begin());
//Realm::Cuda::GPUFBMemory* memFBImpl = (Realm::Cuda::GPUFBMemory*) memImpl;
//off_t offset = memFBImpl->alloc_bytes(workSpaceSize);
//handle.workSpace = memFBImpl->get_direct_ptr(offset, 0);
checkCUDA(hipMalloc(&handle.workSpace, workSpaceSize));
return handle;
}
void UtilityTasks::dummy_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{}
__inline__
int calc_offset(int c, int y, int x, int yscale, int xscale)
{
return (c * yscale * xscale + y * xscale + x);
}
void nearest_neighbor(unsigned char* image,
unsigned char* buffer,
int height, int width,
int orig_height, int orig_width,
float height_scale, float width_scale)
{
// Note buffer is in HWC layout while image is in CHW layout
for (int y = 0; y < height; y++) {
int y0 = ::min(static_cast<int>(roundf(y * height_scale)), orig_height - 1);
for (int x = 0; x < width; x++) {
int x0 = ::min(static_cast<int>(roundf(x * width_scale)), orig_width - 1);
for (int c = 0; c < 3; c++) {
int origOffset = calc_offset(y0, x0, c, orig_width, 3);
int offset = calc_offset(c, y, x, height, width);
image[offset] = buffer[origOffset];
}
}
}
}
/*
regions[0]: image (unsigned char)
regions[1]: label (int)
*/
void UtilityTasks::load_images_task(
const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, HighLevelRuntime *runtime)
{
#ifdef USE_DATA_LOADER
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const AccessorWO<unsigned char, 3> acc_image(regions[0], FID_DATA);
const AccessorWO<int, 1> acc_label(regions[1], FID_DATA);
Rect<3> rect_image;
Rect<1> rect_label;
unsigned char *buffer = (unsigned char*) malloc(3000 * 3000 * 3);
rect_image = runtime->get_index_space_domain(ctx,
task->regions[0].region.get_index_space());
rect_label = runtime->get_index_space_domain(ctx,
task->regions[1].region.get_index_space());
assert(acc_image.accessor.is_dense_arbitrary(rect_image));
assert(acc_label.accessor.is_dense_arbitrary(rect_label));
unsigned char *image_ptr = acc_image.ptr(rect_image.lo);
int *label_ptr = acc_label.ptr(rect_label.lo);
const DataLoadMeta* meta = (DataLoadMeta*) task->local_args;
int height = rect_image.hi[0] - rect_image.lo[0] + 1;
int width = rect_image.hi[1] - rect_image.lo[1] + 1;
int numImages = (rect_image.hi[2] - rect_image.lo[2] + 1) / 3;
assert((rect_image.hi[2] - rect_image.lo[2] + 1) % 3 == 0);
assert(meta->numImages == numImages);
for (int idx = 0; idx < numImages; idx ++) {
label_ptr[idx] = meta->labels[idx];
FILE *file;
if ((file = fopen(meta->files[idx], "rb")) == NULL) {
fprintf(stderr, "cannot open %s\n", meta->files[idx]);
continue;
}
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, file);
jpeg_read_header(&cinfo, TRUE);
jpeg_start_decompress(&cinfo);
if (cinfo.output_components != 3) {
printf(stderr, "skip non-RGB file %s\n", meta->files[idx]);
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(file);
continue;
}
int origHeight = cinfo.output_height;
int origWidth = cinfo.output_width;
int rowStride = width * cinfo.output_components;
JSAMPARRAY array;
array = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, rowStride, 1);
while (cinfo.output_scanline < cinfo.output_height) {
jpeg_read_scanlines(&cinfo, buffer, 1);
memcpy(buffer, array[0], rowStride * sizeof(JSAMPLE));
buffer += rowStride;
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(file);
float heightScale = static_cast<float>(origHeight) / height;
float widthScale = static_cast<float>(origWidth) / width;
nearest_neighbor(image_ptr, buffer, height, width,
origHeight, origWidth, heightScale, widthScale);
image_ptr += 3 * height * width;
}
free(buffer);
#endif
}
__global__
void apply_normalize(float *tensor_ptr, const unsigned char *rgb_ptr,
size_t size, size_t hxw)
{
const float mean[3] = {0.485, 0.456, 0.406};
const float var[3] = {0.229, 0.224, 0.225};
CUDA_KERNEL_LOOP(i, size)
{
// decide the color of the current position by assuming NCHW layout
int c = (i / hxw) % 3;
tensor_ptr[i] = (static_cast<float>(rgb_ptr[i]) / 256 - mean[c]) / var[c];
}
}
/*
regions[0](O): input_images
regions[1](I): input_rgb
*/
__host__
void UtilityTasks::normalize_images_task(
const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const AccessorWO<float, 3> acc_tensor(regions[0], FID_DATA);
const AccessorRO<unsigned char, 3> acc_rgb(regions[1], FID_DATA);
Rect<3> rect_tensor, rect_rgb;
rect_tensor = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
rect_rgb = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(acc_tensor.accessor.is_dense_arbitrary(rect_tensor));
assert(acc_rgb.accessor.is_dense_arbitrary(rect_rgb));
assert(rect_tensor == rect_rgb);
size_t w = rect_tensor.hi[0] - rect_tensor.lo[0] + 1;
size_t h = rect_tensor.hi[1] - rect_tensor.lo[1] + 1;
float *tensor_ptr = acc_tensor.ptr(rect_tensor.lo);
const unsigned char *rgb_ptr = acc_rgb.ptr(rect_rgb.lo);
hipLaunchKernelGGL(( apply_normalize), dim3(GET_BLOCKS(rect_tensor.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
tensor_ptr, rgb_ptr, rect_tensor.volume(), h * w);
}
__global__
void init_image_kernel(float* ptr, coord_t size)
{
const coord_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
ptr[tid] = 1.0f;
}
}
__global__
void init_label_kernel(int* ptr, coord_t size)
{
const coord_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
ptr[tid] = 1;
}
}
void UtilityTasks::init_images_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const int BLKSIZE = 512;
const AccessorWO<float, 3> acc_image(regions[0], FID_DATA);
Rect<3> rect_image;
rect_image = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
assert(acc_image.accessor.is_dense_arbitrary(rect_image));
float *image_ptr = acc_image.ptr(rect_image.lo);
int num_blocks = (rect_image.volume() + BLKSIZE - 1) / BLKSIZE;
hipLaunchKernelGGL(( init_image_kernel), dim3(num_blocks), dim3(BLKSIZE), 0, 0, image_ptr, rect_image.volume());
}
void UtilityTasks::init_labels_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const int BLKSIZE = 512;
const AccessorWO<int, 1> acc_label(regions[0], FID_DATA);
Rect<1> rect_label;
rect_label = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
assert(acc_label.accessor.is_dense_arbitrary(rect_label));
int *label_ptr = acc_label.ptr(rect_label.lo);
int num_blocks = (rect_label.volume() + BLKSIZE - 1) / BLKSIZE;
hipLaunchKernelGGL(( init_label_kernel), dim3(num_blocks), dim3(BLKSIZE), 0, 0, label_ptr, rect_label.volume());
}
//void FFModel::load_images(int batch_id)
//{
// assert(false);
//}
void FFModel::prefetch()
{
for (size_t i = 0; i < layers.size(); i++)
layers[i]->prefetch(*this);
}
//void FFModel::update()
//{
// for (int i = layers.size() - 1; i >= 0; i--)
// layers[i]->update(*this);
//}
template <typename T>
bool Parameter::set_weights(const FFModel& ff,
const std::vector<int>& dims,
const T* data)
{
//TODO: check data type matches
size_t volume = 1;
// Check dimensions
if (numDim != (int)dims.size())
return false;
for (int i = 0; i < numDim; i++) {
if (adim[numDim-1-i] != dims[i])
return false;
volume = volume * dims[i];
}
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
RegionRequirement req(region, WRITE_ONLY, EXCLUSIVE, region);
req.add_field(FID_DATA);
InlineLauncher launcher(req);
PhysicalRegion region = runtime->map_region(ctx, launcher);
region.wait_until_valid();
switch (numDim) {
case 1:
{
TensorAccessorW<T, 1> acc(region, req, FID_DATA, ctx, runtime, false);
assert(acc.rect.volume() == volume);
memcpy(acc.ptr, data, volume * sizeof(T));
break;
}
case 2:
{
TensorAccessorW<T, 2> acc(region, req, FID_DATA, ctx, runtime, false);
assert(acc.rect.volume() == volume);
memcpy(acc.ptr, data, volume * sizeof(T));
break;
}
case 3:
{
TensorAccessorW<T, 3> acc(region, req, FID_DATA, ctx, runtime, false);
assert(acc.rect.volume() == volume);
memcpy(acc.ptr, data, volume * sizeof(T));
break;
}
case 4:
{
TensorAccessorW<T, 4> acc(region, req, FID_DATA, ctx, runtime, false);
assert(acc.rect.volume() == volume);
memcpy(acc.ptr, data, volume * sizeof(T));
break;
}
default:
// Unsupported dim
assert(false);
}
runtime->unmap_region(ctx, region);
return true;
}
template <typename T>
bool Parameter::get_weights(const FFModel& ff,
T* data)
{
//TODO: check data type matches
size_t volume = 1;
for (int i = 0; i < numDim; i++) {
volume = volume * adim[i];
}
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
RegionRequirement req(region, READ_ONLY, EXCLUSIVE, region);
req.add_field(FID_DATA);
InlineLauncher launcher(req);
PhysicalRegion region = runtime->map_region(ctx, launcher);
region.wait_until_valid();
switch (numDim) {
case 1:
{
TensorAccessorR<T, 1> acc(region, req, FID_DATA, ctx, runtime);
assert(acc.rect.volume() == volume);
memcpy(data, acc.ptr, volume * sizeof(T));
break;
}
case 2:
{
TensorAccessorR<T, 2> acc(region, req, FID_DATA, ctx, runtime);
assert(acc.rect.volume() == volume);
memcpy(data, acc.ptr, volume * sizeof(T));
break;
}
case 3:
{
TensorAccessorR<T, 3> acc(region, req, FID_DATA, ctx, runtime);
assert(acc.rect.volume() == volume);
memcpy(data, acc.ptr, volume * sizeof(T));
break;
}
case 4:
{
TensorAccessorR<T, 4> acc(region, req, FID_DATA, ctx, runtime);
assert(acc.rect.volume() == volume);
memcpy(data, acc.ptr, volume * sizeof(T));
break;
}
default:
// Unsupported dim
assert(false);
}
runtime->unmap_region(ctx, region);
return true;
}
|
f7b4ac50da2bfbd398991f0a5eca5b21abe3d9cf.cu
|
/* Copyright 2018 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
//#include "realm/runtime_impl.h"
//#include "realm/cuda/cuda_module.h"
FFHandler UtilityTasks::init_cuda_task(
const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, HighLevelRuntime *runtime)
{
assert(regions.size() == 0);
assert(task->arglen == sizeof(size_t));
size_t workSpaceSize = *(const size_t*) task->args;
printf("workSpaceSize (%d MB)\n", workSpaceSize / 1024 / 1024);
FFHandler handle;
handle.workSpaceSize = workSpaceSize;
checkCUDA(cublasCreate(&handle.blas));
checkCUDNN(cudnnCreate(&handle.dnn));
//std::set<Memory> memFB;
//assert(memFB.size() == 1);
//assert(memFB.begin()->kind() == Memory::GPU_FB_MEM);
//Realm::MemoryImpl* memImpl =
// Realm::get_runtime()->get_memory_impl(*memFB.begin());
//Realm::Cuda::GPUFBMemory* memFBImpl = (Realm::Cuda::GPUFBMemory*) memImpl;
//off_t offset = memFBImpl->alloc_bytes(workSpaceSize);
//handle.workSpace = memFBImpl->get_direct_ptr(offset, 0);
checkCUDA(cudaMalloc(&handle.workSpace, workSpaceSize));
return handle;
}
void UtilityTasks::dummy_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{}
__inline__
int calc_offset(int c, int y, int x, int yscale, int xscale)
{
return (c * yscale * xscale + y * xscale + x);
}
void nearest_neighbor(unsigned char* image,
unsigned char* buffer,
int height, int width,
int orig_height, int orig_width,
float height_scale, float width_scale)
{
// Note buffer is in HWC layout while image is in CHW layout
for (int y = 0; y < height; y++) {
int y0 = std::min(static_cast<int>(roundf(y * height_scale)), orig_height - 1);
for (int x = 0; x < width; x++) {
int x0 = std::min(static_cast<int>(roundf(x * width_scale)), orig_width - 1);
for (int c = 0; c < 3; c++) {
int origOffset = calc_offset(y0, x0, c, orig_width, 3);
int offset = calc_offset(c, y, x, height, width);
image[offset] = buffer[origOffset];
}
}
}
}
/*
regions[0]: image (unsigned char)
regions[1]: label (int)
*/
void UtilityTasks::load_images_task(
const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, HighLevelRuntime *runtime)
{
#ifdef USE_DATA_LOADER
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const AccessorWO<unsigned char, 3> acc_image(regions[0], FID_DATA);
const AccessorWO<int, 1> acc_label(regions[1], FID_DATA);
Rect<3> rect_image;
Rect<1> rect_label;
unsigned char *buffer = (unsigned char*) malloc(3000 * 3000 * 3);
rect_image = runtime->get_index_space_domain(ctx,
task->regions[0].region.get_index_space());
rect_label = runtime->get_index_space_domain(ctx,
task->regions[1].region.get_index_space());
assert(acc_image.accessor.is_dense_arbitrary(rect_image));
assert(acc_label.accessor.is_dense_arbitrary(rect_label));
unsigned char *image_ptr = acc_image.ptr(rect_image.lo);
int *label_ptr = acc_label.ptr(rect_label.lo);
const DataLoadMeta* meta = (DataLoadMeta*) task->local_args;
int height = rect_image.hi[0] - rect_image.lo[0] + 1;
int width = rect_image.hi[1] - rect_image.lo[1] + 1;
int numImages = (rect_image.hi[2] - rect_image.lo[2] + 1) / 3;
assert((rect_image.hi[2] - rect_image.lo[2] + 1) % 3 == 0);
assert(meta->numImages == numImages);
for (int idx = 0; idx < numImages; idx ++) {
label_ptr[idx] = meta->labels[idx];
FILE *file;
if ((file = fopen(meta->files[idx], "rb")) == NULL) {
fprintf(stderr, "cannot open %s\n", meta->files[idx]);
continue;
}
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, file);
jpeg_read_header(&cinfo, TRUE);
jpeg_start_decompress(&cinfo);
if (cinfo.output_components != 3) {
printf(stderr, "skip non-RGB file %s\n", meta->files[idx]);
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(file);
continue;
}
int origHeight = cinfo.output_height;
int origWidth = cinfo.output_width;
int rowStride = width * cinfo.output_components;
JSAMPARRAY array;
array = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, rowStride, 1);
while (cinfo.output_scanline < cinfo.output_height) {
jpeg_read_scanlines(&cinfo, buffer, 1);
memcpy(buffer, array[0], rowStride * sizeof(JSAMPLE));
buffer += rowStride;
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(file);
float heightScale = static_cast<float>(origHeight) / height;
float widthScale = static_cast<float>(origWidth) / width;
nearest_neighbor(image_ptr, buffer, height, width,
origHeight, origWidth, heightScale, widthScale);
image_ptr += 3 * height * width;
}
free(buffer);
#endif
}
__global__
void apply_normalize(float *tensor_ptr, const unsigned char *rgb_ptr,
size_t size, size_t hxw)
{
const float mean[3] = {0.485, 0.456, 0.406};
const float var[3] = {0.229, 0.224, 0.225};
CUDA_KERNEL_LOOP(i, size)
{
// decide the color of the current position by assuming NCHW layout
int c = (i / hxw) % 3;
tensor_ptr[i] = (static_cast<float>(rgb_ptr[i]) / 256 - mean[c]) / var[c];
}
}
/*
regions[0](O): input_images
regions[1](I): input_rgb
*/
__host__
void UtilityTasks::normalize_images_task(
const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const AccessorWO<float, 3> acc_tensor(regions[0], FID_DATA);
const AccessorRO<unsigned char, 3> acc_rgb(regions[1], FID_DATA);
Rect<3> rect_tensor, rect_rgb;
rect_tensor = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
rect_rgb = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(acc_tensor.accessor.is_dense_arbitrary(rect_tensor));
assert(acc_rgb.accessor.is_dense_arbitrary(rect_rgb));
assert(rect_tensor == rect_rgb);
size_t w = rect_tensor.hi[0] - rect_tensor.lo[0] + 1;
size_t h = rect_tensor.hi[1] - rect_tensor.lo[1] + 1;
float *tensor_ptr = acc_tensor.ptr(rect_tensor.lo);
const unsigned char *rgb_ptr = acc_rgb.ptr(rect_rgb.lo);
apply_normalize<<<GET_BLOCKS(rect_tensor.volume()), CUDA_NUM_THREADS>>>(
tensor_ptr, rgb_ptr, rect_tensor.volume(), h * w);
}
__global__
void init_image_kernel(float* ptr, coord_t size)
{
const coord_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
ptr[tid] = 1.0f;
}
}
__global__
void init_label_kernel(int* ptr, coord_t size)
{
const coord_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
ptr[tid] = 1;
}
}
void UtilityTasks::init_images_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const int BLKSIZE = 512;
const AccessorWO<float, 3> acc_image(regions[0], FID_DATA);
Rect<3> rect_image;
rect_image = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
assert(acc_image.accessor.is_dense_arbitrary(rect_image));
float *image_ptr = acc_image.ptr(rect_image.lo);
int num_blocks = (rect_image.volume() + BLKSIZE - 1) / BLKSIZE;
init_image_kernel<<<num_blocks, BLKSIZE>>>(image_ptr, rect_image.volume());
}
void UtilityTasks::init_labels_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const int BLKSIZE = 512;
const AccessorWO<int, 1> acc_label(regions[0], FID_DATA);
Rect<1> rect_label;
rect_label = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
assert(acc_label.accessor.is_dense_arbitrary(rect_label));
int *label_ptr = acc_label.ptr(rect_label.lo);
int num_blocks = (rect_label.volume() + BLKSIZE - 1) / BLKSIZE;
init_label_kernel<<<num_blocks, BLKSIZE>>>(label_ptr, rect_label.volume());
}
//void FFModel::load_images(int batch_id)
//{
// assert(false);
//}
void FFModel::prefetch()
{
for (size_t i = 0; i < layers.size(); i++)
layers[i]->prefetch(*this);
}
//void FFModel::update()
//{
// for (int i = layers.size() - 1; i >= 0; i--)
// layers[i]->update(*this);
//}
template <typename T>
bool Parameter::set_weights(const FFModel& ff,
const std::vector<int>& dims,
const T* data)
{
//TODO: check data type matches
size_t volume = 1;
// Check dimensions
if (numDim != (int)dims.size())
return false;
for (int i = 0; i < numDim; i++) {
if (adim[numDim-1-i] != dims[i])
return false;
volume = volume * dims[i];
}
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
RegionRequirement req(region, WRITE_ONLY, EXCLUSIVE, region);
req.add_field(FID_DATA);
InlineLauncher launcher(req);
PhysicalRegion region = runtime->map_region(ctx, launcher);
region.wait_until_valid();
switch (numDim) {
case 1:
{
TensorAccessorW<T, 1> acc(region, req, FID_DATA, ctx, runtime, false);
assert(acc.rect.volume() == volume);
memcpy(acc.ptr, data, volume * sizeof(T));
break;
}
case 2:
{
TensorAccessorW<T, 2> acc(region, req, FID_DATA, ctx, runtime, false);
assert(acc.rect.volume() == volume);
memcpy(acc.ptr, data, volume * sizeof(T));
break;
}
case 3:
{
TensorAccessorW<T, 3> acc(region, req, FID_DATA, ctx, runtime, false);
assert(acc.rect.volume() == volume);
memcpy(acc.ptr, data, volume * sizeof(T));
break;
}
case 4:
{
TensorAccessorW<T, 4> acc(region, req, FID_DATA, ctx, runtime, false);
assert(acc.rect.volume() == volume);
memcpy(acc.ptr, data, volume * sizeof(T));
break;
}
default:
// Unsupported dim
assert(false);
}
runtime->unmap_region(ctx, region);
return true;
}
template <typename T>
bool Parameter::get_weights(const FFModel& ff,
T* data)
{
//TODO: check data type matches
size_t volume = 1;
for (int i = 0; i < numDim; i++) {
volume = volume * adim[i];
}
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
RegionRequirement req(region, READ_ONLY, EXCLUSIVE, region);
req.add_field(FID_DATA);
InlineLauncher launcher(req);
PhysicalRegion region = runtime->map_region(ctx, launcher);
region.wait_until_valid();
switch (numDim) {
case 1:
{
TensorAccessorR<T, 1> acc(region, req, FID_DATA, ctx, runtime);
assert(acc.rect.volume() == volume);
memcpy(data, acc.ptr, volume * sizeof(T));
break;
}
case 2:
{
TensorAccessorR<T, 2> acc(region, req, FID_DATA, ctx, runtime);
assert(acc.rect.volume() == volume);
memcpy(data, acc.ptr, volume * sizeof(T));
break;
}
case 3:
{
TensorAccessorR<T, 3> acc(region, req, FID_DATA, ctx, runtime);
assert(acc.rect.volume() == volume);
memcpy(data, acc.ptr, volume * sizeof(T));
break;
}
case 4:
{
TensorAccessorR<T, 4> acc(region, req, FID_DATA, ctx, runtime);
assert(acc.rect.volume() == volume);
memcpy(data, acc.ptr, volume * sizeof(T));
break;
}
default:
// Unsupported dim
assert(false);
}
runtime->unmap_region(ctx, region);
return true;
}
|
ee01c8113fe102c24202dc1bb55309633332d245.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: jglaser
#include "TwoStepNPTMTKGPU.cuh"
#include "hoomd/VectorMath.h"
#include <assert.h>
/*! \file TwoStepNPTMTKGPU.cu
\brief Defines GPU kernel code for NPT integration on the GPU using the Martyna-Tobias-Klein update equations. Used by TwoStepNPTMTKGPU.
*/
//! Kernel to propagate the positions and velocities, first half of NPT update
__global__ void gpu_npt_mtk_step_one_kernel(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar exp_thermo_fac,
Scalar mat_exp_v_xx,
Scalar mat_exp_v_xy,
Scalar mat_exp_v_xz,
Scalar mat_exp_v_yy,
Scalar mat_exp_v_yz,
Scalar mat_exp_v_zz,
Scalar mat_exp_r_xx,
Scalar mat_exp_r_xy,
Scalar mat_exp_r_xz,
Scalar mat_exp_r_yy,
Scalar mat_exp_r_yz,
Scalar mat_exp_r_zz,
Scalar mat_exp_r_int_xx,
Scalar mat_exp_r_int_xy,
Scalar mat_exp_r_int_xz,
Scalar mat_exp_r_int_yy,
Scalar mat_exp_r_int_yz,
Scalar mat_exp_r_int_zz,
Scalar deltaT,
bool rescale_all)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
// initialize eigenvectors
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// fetch particle position
Scalar4 pos = d_pos[idx];
// fetch particle velocity and acceleration
Scalar4 vel = d_vel[idx];
Scalar3 v = make_scalar3(vel.x, vel.y, vel.z);
Scalar3 accel = d_accel[idx];;
Scalar3 r = make_scalar3(pos.x, pos.y, pos.z);
// advance velocity
v += deltaT/Scalar(2.0) * accel;
// propagate velocity by half a time step and position by the full time step
// by multiplying with upper triangular matrix
v.x = mat_exp_v_xx * v.x + mat_exp_v_xy * v.y + mat_exp_v_xz * v.z;
v.y = mat_exp_v_yy * v.y + mat_exp_v_yz * v.z;
v.z = mat_exp_v_zz * v.z;
// apply thermostat update of velocity
v *= exp_thermo_fac;
if (!rescale_all)
{
// rescale this group of particles
r.x = mat_exp_r_xx * r.x + mat_exp_r_xy * r.y + mat_exp_r_xz * r.z;
r.y = mat_exp_r_yy * r.y + mat_exp_r_yz * r.z;
r.z = mat_exp_r_zz * r.z;
}
r.x += mat_exp_r_int_xx * v.x + mat_exp_r_int_xy * v.y + mat_exp_r_int_xz * v.z;
r.y += mat_exp_r_int_yy * v.y + mat_exp_r_int_yz * v.z;
r.z += mat_exp_r_int_zz * v.z;
// write out the results
d_pos[idx] = make_scalar4(r.x,r.y,r.z,pos.w);
d_vel[idx] = make_scalar4(v.x,v.y,v.z,vel.w);
}
}
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param exp_thermo_fac Update factor for thermostat
\param mat_exp_v Matrix exponential for velocity update
\param mat_exp_r Matrix exponential for position update
\param mat_exp_r_int Integrated matrix exp for position update
\param deltaT Time to advance (for one full step)
\param deltaT Time to move forward in one whole step
\param rescale_all True if all particles in the system should be rescaled at once
This is just a kernel driver for gpu_npt_mtk_step_one_kernel(). See it for more details.
*/
hipError_t gpu_npt_mtk_step_one(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar exp_thermo_fac,
Scalar *mat_exp_v,
Scalar *mat_exp_r,
Scalar *mat_exp_r_int,
Scalar deltaT,
bool rescale_all)
{
// setup the grid to run the kernel
unsigned int block_size = 256;
dim3 grid( (group_size / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_npt_mtk_step_one_kernel), dim3(grid), dim3(threads) , 0, 0, d_pos,
d_vel,
d_accel,
d_group_members,
group_size,
exp_thermo_fac,
mat_exp_v[0],
mat_exp_v[1],
mat_exp_v[2],
mat_exp_v[3],
mat_exp_v[4],
mat_exp_v[5],
mat_exp_r[0],
mat_exp_r[1],
mat_exp_r[2],
mat_exp_r[3],
mat_exp_r[4],
mat_exp_r[5],
mat_exp_r_int[0],
mat_exp_r_int[1],
mat_exp_r_int[2],
mat_exp_r_int[3],
mat_exp_r_int[4],
mat_exp_r_int[5],
deltaT,
rescale_all);
return hipSuccess;
}
/*! \param N number of particles in the system
\param d_pos array of particle positions
\param d_image array of particle images
\param box The new box the particles where the particles now reside
Wrap particle positions for all particles in the box
*/
extern "C" __global__
void gpu_npt_mtk_wrap_kernel(const unsigned int N,
Scalar4 *d_pos,
int3 *d_image,
BoxDim box)
{
// determine which particle this thread works on
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// wrap ALL particles in the box
if (idx < N)
{
// fetch particle position
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
// read in the image flags
int3 image = d_image[idx];
// fix periodic boundary conditions
box.wrap(pos, image);
// write out the results
d_pos[idx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_image[idx] = image;
}
}
/*! \param N number of particles in the system
\param d_pos array of particle positions
\param d_image array of particle images
\param box The new box the particles where the particles now reside
This is just a kernel driver for gpu_npt_mtk_wrap_kernel(). See it for more details.
*/
hipError_t gpu_npt_mtk_wrap(const unsigned int N,
Scalar4 *d_pos,
int3 *d_image,
const BoxDim& box)
{
// setup the grid to run the kernel
unsigned int block_size=256;
dim3 grid( (N / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_npt_mtk_wrap_kernel), dim3(grid), dim3(threads) , 0, 0, N, d_pos, d_image, box);
return hipSuccess;
}
//! Kernel to propagate the positions and velocities, second half of NPT update
__global__ void gpu_npt_mtk_step_two_kernel(Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar4 *d_net_force,
unsigned int *d_group_members,
unsigned int group_size,
Scalar mat_exp_v_xx,
Scalar mat_exp_v_xy,
Scalar mat_exp_v_xz,
Scalar mat_exp_v_yy,
Scalar mat_exp_v_yz,
Scalar mat_exp_v_zz,
Scalar deltaT,
Scalar exp_thermo_fac)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// fetch particle velocity and acceleration
Scalar4 vel = d_vel[idx];
// compute acceleration
Scalar minv = Scalar(1.0)/vel.w;
Scalar4 net_force = d_net_force[idx];
Scalar3 accel = make_scalar3(net_force.x, net_force.y, net_force.z);
accel *= minv;
Scalar3 v = make_scalar3(vel.x, vel.y, vel.z);
// apply thermostat rescaling
v = v*exp_thermo_fac;
// propagate velocity by half a time step by multiplying with an upper triangular matrix
v.x = mat_exp_v_xx * v.x + mat_exp_v_xy * v.y + mat_exp_v_xz * v.z;
v.y = mat_exp_v_yy * v.y + mat_exp_v_yz * v.z;
v.z = mat_exp_v_zz * v.z;
// advance velocity
v += deltaT/Scalar(2.0) * accel;
// write out velocity
d_vel[idx] = make_scalar4(v.x, v.y, v.z, vel.w);
// since we calculate the acceleration, we need to write it for the next step
d_accel[idx] = accel;
}
}
/*! \param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param mat_exp_v Matrix exponential for velocity update
\param d_net_force Net force on each particle
\param deltaT Time to move forward in one whole step
This is just a kernel driver for gpu_npt_mtk_step_kernel(). See it for more details.
*/
hipError_t gpu_npt_mtk_step_two(Scalar4 *d_vel,
Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar* mat_exp_v,
Scalar deltaT,
Scalar exp_thermo_fac)
{
// setup the grid to run the kernel
unsigned int block_size=256;
dim3 grid( (group_size / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_npt_mtk_step_two_kernel), dim3(grid), dim3(threads) , 0, 0, d_vel,
d_accel,
d_net_force,
d_group_members,
group_size,
mat_exp_v[0],
mat_exp_v[1],
mat_exp_v[2],
mat_exp_v[3],
mat_exp_v[4],
mat_exp_v[5],
deltaT,
exp_thermo_fac);
return hipSuccess;
}
__global__ void gpu_npt_mtk_rescale_kernel(unsigned int N,
Scalar4 *d_postype,
Scalar mat_exp_r_xx,
Scalar mat_exp_r_xy,
Scalar mat_exp_r_xz,
Scalar mat_exp_r_yy,
Scalar mat_exp_r_yz,
Scalar mat_exp_r_zz)
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= N) return;
// rescale position
Scalar4 postype = d_postype[idx];
Scalar3 r = make_scalar3(postype.x,postype.y,postype.z);
r.x = mat_exp_r_xx * r.x + mat_exp_r_xy * r.y + mat_exp_r_xz * r.z;
r.y = mat_exp_r_yy * r.y + mat_exp_r_yz* r.z;
r.z = mat_exp_r_zz * r.z;
d_postype[idx] = make_scalar4(r.x, r.y, r.z, postype.w);
}
void gpu_npt_mtk_rescale(unsigned int N,
Scalar4 *d_postype,
Scalar mat_exp_r_xx,
Scalar mat_exp_r_xy,
Scalar mat_exp_r_xz,
Scalar mat_exp_r_yy,
Scalar mat_exp_r_yz,
Scalar mat_exp_r_zz)
{
unsigned int block_size = 256;
dim3 grid( (N / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
hipLaunchKernelGGL(( gpu_npt_mtk_rescale_kernel), dim3(grid), dim3(threads), 0, 0, N,
d_postype,
mat_exp_r_xx,
mat_exp_r_xy,
mat_exp_r_xz,
mat_exp_r_yy,
mat_exp_r_yz,
mat_exp_r_zz);
}
|
ee01c8113fe102c24202dc1bb55309633332d245.cu
|
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: jglaser
#include "TwoStepNPTMTKGPU.cuh"
#include "hoomd/VectorMath.h"
#include <assert.h>
/*! \file TwoStepNPTMTKGPU.cu
\brief Defines GPU kernel code for NPT integration on the GPU using the Martyna-Tobias-Klein update equations. Used by TwoStepNPTMTKGPU.
*/
//! Kernel to propagate the positions and velocities, first half of NPT update
__global__ void gpu_npt_mtk_step_one_kernel(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar exp_thermo_fac,
Scalar mat_exp_v_xx,
Scalar mat_exp_v_xy,
Scalar mat_exp_v_xz,
Scalar mat_exp_v_yy,
Scalar mat_exp_v_yz,
Scalar mat_exp_v_zz,
Scalar mat_exp_r_xx,
Scalar mat_exp_r_xy,
Scalar mat_exp_r_xz,
Scalar mat_exp_r_yy,
Scalar mat_exp_r_yz,
Scalar mat_exp_r_zz,
Scalar mat_exp_r_int_xx,
Scalar mat_exp_r_int_xy,
Scalar mat_exp_r_int_xz,
Scalar mat_exp_r_int_yy,
Scalar mat_exp_r_int_yz,
Scalar mat_exp_r_int_zz,
Scalar deltaT,
bool rescale_all)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
// initialize eigenvectors
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// fetch particle position
Scalar4 pos = d_pos[idx];
// fetch particle velocity and acceleration
Scalar4 vel = d_vel[idx];
Scalar3 v = make_scalar3(vel.x, vel.y, vel.z);
Scalar3 accel = d_accel[idx];;
Scalar3 r = make_scalar3(pos.x, pos.y, pos.z);
// advance velocity
v += deltaT/Scalar(2.0) * accel;
// propagate velocity by half a time step and position by the full time step
// by multiplying with upper triangular matrix
v.x = mat_exp_v_xx * v.x + mat_exp_v_xy * v.y + mat_exp_v_xz * v.z;
v.y = mat_exp_v_yy * v.y + mat_exp_v_yz * v.z;
v.z = mat_exp_v_zz * v.z;
// apply thermostat update of velocity
v *= exp_thermo_fac;
if (!rescale_all)
{
// rescale this group of particles
r.x = mat_exp_r_xx * r.x + mat_exp_r_xy * r.y + mat_exp_r_xz * r.z;
r.y = mat_exp_r_yy * r.y + mat_exp_r_yz * r.z;
r.z = mat_exp_r_zz * r.z;
}
r.x += mat_exp_r_int_xx * v.x + mat_exp_r_int_xy * v.y + mat_exp_r_int_xz * v.z;
r.y += mat_exp_r_int_yy * v.y + mat_exp_r_int_yz * v.z;
r.z += mat_exp_r_int_zz * v.z;
// write out the results
d_pos[idx] = make_scalar4(r.x,r.y,r.z,pos.w);
d_vel[idx] = make_scalar4(v.x,v.y,v.z,vel.w);
}
}
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param exp_thermo_fac Update factor for thermostat
\param mat_exp_v Matrix exponential for velocity update
\param mat_exp_r Matrix exponential for position update
\param mat_exp_r_int Integrated matrix exp for position update
\param deltaT Time to advance (for one full step)
\param deltaT Time to move forward in one whole step
\param rescale_all True if all particles in the system should be rescaled at once
This is just a kernel driver for gpu_npt_mtk_step_one_kernel(). See it for more details.
*/
cudaError_t gpu_npt_mtk_step_one(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar exp_thermo_fac,
Scalar *mat_exp_v,
Scalar *mat_exp_r,
Scalar *mat_exp_r_int,
Scalar deltaT,
bool rescale_all)
{
// setup the grid to run the kernel
unsigned int block_size = 256;
dim3 grid( (group_size / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
gpu_npt_mtk_step_one_kernel<<< grid, threads >>>(d_pos,
d_vel,
d_accel,
d_group_members,
group_size,
exp_thermo_fac,
mat_exp_v[0],
mat_exp_v[1],
mat_exp_v[2],
mat_exp_v[3],
mat_exp_v[4],
mat_exp_v[5],
mat_exp_r[0],
mat_exp_r[1],
mat_exp_r[2],
mat_exp_r[3],
mat_exp_r[4],
mat_exp_r[5],
mat_exp_r_int[0],
mat_exp_r_int[1],
mat_exp_r_int[2],
mat_exp_r_int[3],
mat_exp_r_int[4],
mat_exp_r_int[5],
deltaT,
rescale_all);
return cudaSuccess;
}
/*! \param N number of particles in the system
\param d_pos array of particle positions
\param d_image array of particle images
\param box The new box the particles where the particles now reside
Wrap particle positions for all particles in the box
*/
extern "C" __global__
void gpu_npt_mtk_wrap_kernel(const unsigned int N,
Scalar4 *d_pos,
int3 *d_image,
BoxDim box)
{
// determine which particle this thread works on
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// wrap ALL particles in the box
if (idx < N)
{
// fetch particle position
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
// read in the image flags
int3 image = d_image[idx];
// fix periodic boundary conditions
box.wrap(pos, image);
// write out the results
d_pos[idx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_image[idx] = image;
}
}
/*! \param N number of particles in the system
\param d_pos array of particle positions
\param d_image array of particle images
\param box The new box the particles where the particles now reside
This is just a kernel driver for gpu_npt_mtk_wrap_kernel(). See it for more details.
*/
cudaError_t gpu_npt_mtk_wrap(const unsigned int N,
Scalar4 *d_pos,
int3 *d_image,
const BoxDim& box)
{
// setup the grid to run the kernel
unsigned int block_size=256;
dim3 grid( (N / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
gpu_npt_mtk_wrap_kernel<<< grid, threads >>>(N, d_pos, d_image, box);
return cudaSuccess;
}
//! Kernel to propagate the positions and velocities, second half of NPT update
__global__ void gpu_npt_mtk_step_two_kernel(Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar4 *d_net_force,
unsigned int *d_group_members,
unsigned int group_size,
Scalar mat_exp_v_xx,
Scalar mat_exp_v_xy,
Scalar mat_exp_v_xz,
Scalar mat_exp_v_yy,
Scalar mat_exp_v_yz,
Scalar mat_exp_v_zz,
Scalar deltaT,
Scalar exp_thermo_fac)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// fetch particle velocity and acceleration
Scalar4 vel = d_vel[idx];
// compute acceleration
Scalar minv = Scalar(1.0)/vel.w;
Scalar4 net_force = d_net_force[idx];
Scalar3 accel = make_scalar3(net_force.x, net_force.y, net_force.z);
accel *= minv;
Scalar3 v = make_scalar3(vel.x, vel.y, vel.z);
// apply thermostat rescaling
v = v*exp_thermo_fac;
// propagate velocity by half a time step by multiplying with an upper triangular matrix
v.x = mat_exp_v_xx * v.x + mat_exp_v_xy * v.y + mat_exp_v_xz * v.z;
v.y = mat_exp_v_yy * v.y + mat_exp_v_yz * v.z;
v.z = mat_exp_v_zz * v.z;
// advance velocity
v += deltaT/Scalar(2.0) * accel;
// write out velocity
d_vel[idx] = make_scalar4(v.x, v.y, v.z, vel.w);
// since we calculate the acceleration, we need to write it for the next step
d_accel[idx] = accel;
}
}
/*! \param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param mat_exp_v Matrix exponential for velocity update
\param d_net_force Net force on each particle
\param deltaT Time to move forward in one whole step
This is just a kernel driver for gpu_npt_mtk_step_kernel(). See it for more details.
*/
cudaError_t gpu_npt_mtk_step_two(Scalar4 *d_vel,
Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar* mat_exp_v,
Scalar deltaT,
Scalar exp_thermo_fac)
{
// setup the grid to run the kernel
unsigned int block_size=256;
dim3 grid( (group_size / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
gpu_npt_mtk_step_two_kernel<<< grid, threads >>>(d_vel,
d_accel,
d_net_force,
d_group_members,
group_size,
mat_exp_v[0],
mat_exp_v[1],
mat_exp_v[2],
mat_exp_v[3],
mat_exp_v[4],
mat_exp_v[5],
deltaT,
exp_thermo_fac);
return cudaSuccess;
}
__global__ void gpu_npt_mtk_rescale_kernel(unsigned int N,
Scalar4 *d_postype,
Scalar mat_exp_r_xx,
Scalar mat_exp_r_xy,
Scalar mat_exp_r_xz,
Scalar mat_exp_r_yy,
Scalar mat_exp_r_yz,
Scalar mat_exp_r_zz)
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= N) return;
// rescale position
Scalar4 postype = d_postype[idx];
Scalar3 r = make_scalar3(postype.x,postype.y,postype.z);
r.x = mat_exp_r_xx * r.x + mat_exp_r_xy * r.y + mat_exp_r_xz * r.z;
r.y = mat_exp_r_yy * r.y + mat_exp_r_yz* r.z;
r.z = mat_exp_r_zz * r.z;
d_postype[idx] = make_scalar4(r.x, r.y, r.z, postype.w);
}
void gpu_npt_mtk_rescale(unsigned int N,
Scalar4 *d_postype,
Scalar mat_exp_r_xx,
Scalar mat_exp_r_xy,
Scalar mat_exp_r_xz,
Scalar mat_exp_r_yy,
Scalar mat_exp_r_yz,
Scalar mat_exp_r_zz)
{
unsigned int block_size = 256;
dim3 grid( (N / block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
gpu_npt_mtk_rescale_kernel<<<grid, threads>>> (N,
d_postype,
mat_exp_r_xx,
mat_exp_r_xy,
mat_exp_r_xz,
mat_exp_r_yy,
mat_exp_r_yz,
mat_exp_r_zz);
}
|
d77f48871a5305c9c3f827f5195bbe980695a244.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
__global__ void RWR_init3 ( double* __restrict__ devExpVec0,
const double r,
const int V ) {
const int id = Tid + blockIdx.x * blockDim.x;
if (id < V) {
devExpVec0[id] *= r;
}
}
__global__ void RWR_CUDA3 ( int* __restrict__ devNodes,
int* __restrict__ devEdges,
int* __restrict__ devDegrees,
double* __restrict__ devExpVec1,
double* __restrict__ devExpVec2,
double* __restrict__ devExpVec0,
const double r,
const int V,
const int VWARP_SZ) {
const int id = Tid + blockIdx.x * blockDim.x;
const int stride = blockDim.x * gridDim.x / VWARP_SZ;
//const int stride = DIV(V, VWARP_SZ);
const int vid = id / VWARP_SZ;
const int mod = Tid & (VWARP_SZ-1);
for (int k = vid ; k < V ; k+=stride) {
const int start = devNodes[k];
const int end = devNodes[k+1];
double sum = 0;
devExpVec2[k] = 0;
for (int i = start + mod ; i < end ; i+=VWARP_SZ) {
const int e = devEdges[i];
sum += devExpVec1[e]/devDegrees[e];
}
//atomicAdd(devExpVec2 + k, sum);
atomicAdd(devExpVec2 + k, sum * (1-r));
//devExpVec2[id] = sum * (1-r) + devExpVec0[id];
if(mod == 0)
devExpVec2[k] += devExpVec0[k];
//devExpVec2[k] *= (1-r);
}
// int id = Tid + blockIdx.x * blockDim.x;
// if (id < V) {
// double sum = 0;
// for (int i = devNodes[id] ; i < devNodes[id+1] ; ++i) {
// int e = devEdges[i];
// sum += devExpVec1[e]/devDegrees[e];
// }
// devExpVec2[id] = sum * (1-r) + devExpVec0[id];
// }
}
inline void cudaExpMatrix::cudaRWR_Kernel3(double r, int stop_step) {
init_kernel3();
std::cout << " (Kernel 3)" << '\n';
//int gridDim = min(MAX_CONCURR_TH/BLOCKDIM , DIV(V, BLOCKDIM));
int gridDim = DIV(V, BLOCKDIM);
timer::Timer<timer::DEVICE> TM;
//int mean;
double time = 0;
int size = 4; //virtual warp of 4 give best result
for (int i = 0 ; i < M ; ++i) {
int level = 0;
int offset = i*V;
hipMemcpyAsync((void**) devExpVec0, expMatrix.eMatrix + offset, V * sizeof (double), hipMemcpyHostToDevice);
hipMemcpyAsync((void**) devExpVec1, expMatrix.eMatrix + offset, V * sizeof (double), hipMemcpyHostToDevice);
hipMemsetAsync((void**) devExpVec2, 0, V*sizeof(double));
TM.start();
hipLaunchKernelGGL(( RWR_init3), dim3(gridDim), dim3(BLOCKDIM) , 0, 0, devExpVec0, r, V);
while( level++ < stop_step ) {
hipLaunchKernelGGL(( RWR_CUDA3), dim3(gridDim), dim3(BLOCKDIM) , 0, 0, devNodes, devEdges, devDegrees,
devExpVec1, devExpVec2, devExpVec0,
r, V, size);
//hipDeviceSynchronize();
double* tmp = devExpVec1;
devExpVec1 = devExpVec2;
devExpVec2 = tmp;
}
TM.stop();
time += TM.duration();
hipMemcpyAsync(expMatrix.eMatrix + offset, devExpVec1, V * sizeof(double), hipMemcpyDeviceToHost);
//std::cout << '\n';
//for (int j = 0 ; j < V ; ++j) {
// std::cout << std::setprecision(5) << expMatrix.eMatrix[j + offset] << ',';
//}
}
std::cout <<"time no data copy: " << time << '\n';
}
void cudaExpMatrix::init_kernel3() {
hipMalloc(&devNodes, (V + 1) * sizeof (int));
hipMalloc(&devEdges, E * sizeof (int));
hipMalloc(&devDegrees, V * sizeof (int));
hipMemcpy((void**) devNodes, graph.nodes, (V + 1) * sizeof (int), hipMemcpyHostToDevice);
hipMemcpy((void**) devEdges, graph.edges, E * sizeof (int), hipMemcpyHostToDevice);
hipMemcpy((void**) devDegrees, graph.degree, V * sizeof (int), hipMemcpyHostToDevice);
hipMalloc(&devExpVec0, V * M * sizeof (double));
hipMalloc(&devExpVec1, V * M * sizeof (double));
hipMalloc(&devExpVec2, V * M * sizeof (double));
hipError_t("Graph Allocation");
}
|
d77f48871a5305c9c3f827f5195bbe980695a244.cu
|
#pragma once
__global__ void RWR_init3 ( double* __restrict__ devExpVec0,
const double r,
const int V ) {
const int id = Tid + blockIdx.x * blockDim.x;
if (id < V) {
devExpVec0[id] *= r;
}
}
__global__ void RWR_CUDA3 ( int* __restrict__ devNodes,
int* __restrict__ devEdges,
int* __restrict__ devDegrees,
double* __restrict__ devExpVec1,
double* __restrict__ devExpVec2,
double* __restrict__ devExpVec0,
const double r,
const int V,
const int VWARP_SZ) {
const int id = Tid + blockIdx.x * blockDim.x;
const int stride = blockDim.x * gridDim.x / VWARP_SZ;
//const int stride = DIV(V, VWARP_SZ);
const int vid = id / VWARP_SZ;
const int mod = Tid & (VWARP_SZ-1);
for (int k = vid ; k < V ; k+=stride) {
const int start = devNodes[k];
const int end = devNodes[k+1];
double sum = 0;
devExpVec2[k] = 0;
for (int i = start + mod ; i < end ; i+=VWARP_SZ) {
const int e = devEdges[i];
sum += devExpVec1[e]/devDegrees[e];
}
//atomicAdd(devExpVec2 + k, sum);
atomicAdd(devExpVec2 + k, sum * (1-r));
//devExpVec2[id] = sum * (1-r) + devExpVec0[id];
if(mod == 0)
devExpVec2[k] += devExpVec0[k];
//devExpVec2[k] *= (1-r);
}
// int id = Tid + blockIdx.x * blockDim.x;
// if (id < V) {
// double sum = 0;
// for (int i = devNodes[id] ; i < devNodes[id+1] ; ++i) {
// int e = devEdges[i];
// sum += devExpVec1[e]/devDegrees[e];
// }
// devExpVec2[id] = sum * (1-r) + devExpVec0[id];
// }
}
inline void cudaExpMatrix::cudaRWR_Kernel3(double r, int stop_step) {
init_kernel3();
std::cout << " (Kernel 3)" << '\n';
//int gridDim = min(MAX_CONCURR_TH/BLOCKDIM , DIV(V, BLOCKDIM));
int gridDim = DIV(V, BLOCKDIM);
timer::Timer<timer::DEVICE> TM;
//int mean;
double time = 0;
int size = 4; //virtual warp of 4 give best result
for (int i = 0 ; i < M ; ++i) {
int level = 0;
int offset = i*V;
cudaMemcpyAsync((void**) devExpVec0, expMatrix.eMatrix + offset, V * sizeof (double), cudaMemcpyHostToDevice);
cudaMemcpyAsync((void**) devExpVec1, expMatrix.eMatrix + offset, V * sizeof (double), cudaMemcpyHostToDevice);
cudaMemsetAsync((void**) devExpVec2, 0, V*sizeof(double));
TM.start();
RWR_init3<<<gridDim, BLOCKDIM >>> (devExpVec0, r, V);
while( level++ < stop_step ) {
RWR_CUDA3<<<gridDim, BLOCKDIM >>> (devNodes, devEdges, devDegrees,
devExpVec1, devExpVec2, devExpVec0,
r, V, size);
//cudaDeviceSynchronize();
double* tmp = devExpVec1;
devExpVec1 = devExpVec2;
devExpVec2 = tmp;
}
TM.stop();
time += TM.duration();
cudaMemcpyAsync(expMatrix.eMatrix + offset, devExpVec1, V * sizeof(double), cudaMemcpyDeviceToHost);
//std::cout << '\n';
//for (int j = 0 ; j < V ; ++j) {
// std::cout << std::setprecision(5) << expMatrix.eMatrix[j + offset] << ',';
//}
}
std::cout <<"time no data copy: " << time << '\n';
}
void cudaExpMatrix::init_kernel3() {
cudaMalloc(&devNodes, (V + 1) * sizeof (int));
cudaMalloc(&devEdges, E * sizeof (int));
cudaMalloc(&devDegrees, V * sizeof (int));
cudaMemcpy((void**) devNodes, graph.nodes, (V + 1) * sizeof (int), cudaMemcpyHostToDevice);
cudaMemcpy((void**) devEdges, graph.edges, E * sizeof (int), cudaMemcpyHostToDevice);
cudaMemcpy((void**) devDegrees, graph.degree, V * sizeof (int), cudaMemcpyHostToDevice);
cudaMalloc(&devExpVec0, V * M * sizeof (double));
cudaMalloc(&devExpVec1, V * M * sizeof (double));
cudaMalloc(&devExpVec2, V * M * sizeof (double));
cudaError("Graph Allocation");
}
|
0a6177862cf2d2eec09b832c699adb0e7a7da8d7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrixAddKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrixAddKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrixAddKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrixAddKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
0a6177862cf2d2eec09b832c699adb0e7a7da8d7.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrixAddKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrixAddKernel<<<gridBlock,threadBlock>>>(A,B,C,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrixAddKernel<<<gridBlock,threadBlock>>>(A,B,C,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrixAddKernel<<<gridBlock,threadBlock>>>(A,B,C,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
078d1fe0c046346c940e834eefdfe9b88b4235f2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! \file GPUgenerateblobs.cu
* \author Fang Huang
* \date October 10, 2010
* \brief This file contains all relevant code to reassemble the final image
* from the fitted subregions.
*/
// includes, system
#include "image_operation.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "mex.h"
#include "matrix.h"
// Thread block size
#define BSZ 64
#define MEM 70
#define IMSZ 11
#define IMSZBIG 21
#define imMEM 4000
#define NK 128 //number of blocks to run in each kernel
#define pi 3.141592f
#define min(a,b) (((a) < (b)) ? (a) : (b))
#define max(a,b) (((a) > (b)) ? (a) : (b))
void cudasafe( hipError_t err, char* str, int lineNumber);
void CUDAERROR(const char *instr,int lineNumber);
__global__ void kernel_guassiansampleblobs(int,int,int, float*,float*,float*, float*,float*,float*,float*,float*,float*);
__global__ void kernel_guassianintegrateblobs(int,int,int, float*,float*,float*, float*,float*,float*,float*,float*,float*);
void GPUgenerateblobs(int blobn,int xsz,int ysz,float *xx,float *yy, float *nn, float *LAx, float *LAy, float *cov, float *im)
{
/*!
* \brief Setup memory and run the CUDA kernel_guassian methods.
* \param blobn
* \param xsz
* \param ysz
* \param xx
* \param yy
* \param nn
* \param LAx
* \param LAy
* \param cov
* \param im foo?
*/
float *d_xarray,*d_yarray,*d_Narray,*d_xsigma,*d_ysigma,*d_covariance,*d_im,*d_xl,*d_yt;
float *subim,*xl,*yt;
int ii,iii,jj,kk,loc,boxsz;
float sigma;
int BlockSize;
int flag=0;
// define text variable
char text[1024];
//find max sigma
float maxsigma=-1;
for(ii=0;ii<blobn;ii++){
sigma=sqrt(pow(LAx[ii],2)+pow(LAy[ii],2));
maxsigma=max(maxsigma,sigma);
}
boxsz=(int) round(float (4*maxsigma+1));
boxsz=min(boxsz,20);// max box size is 20, won't work for psfsigma>6
//boxsz=20;
BlockSize=(int) min(ceil((float) 15000/4/boxsz/boxsz),64);
int memblobsnum=(int)ceil((float)blobn/BlockSize)+128;
hipMalloc((void**)&d_xarray, memblobsnum*BlockSize*sizeof(float));
hipMemset(d_xarray, 0, memblobsnum*BlockSize*sizeof(float));
hipMemcpy(d_xarray, xx, blobn*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**)&d_yarray, memblobsnum*BlockSize*sizeof(float));
hipMemset(d_yarray, 0, memblobsnum*BlockSize*sizeof(float));
hipMemcpy(d_yarray, yy,blobn*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**)&d_Narray, memblobsnum*BlockSize*sizeof(float));
hipMemset(d_Narray, 0, memblobsnum*BlockSize*sizeof(float));
hipMemcpy(d_Narray, nn,blobn*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**)&d_xsigma, memblobsnum*BlockSize*sizeof(float));
hipMemset(d_xsigma, 0, memblobsnum*BlockSize*sizeof(float));
hipMemcpy(d_xsigma, LAx,blobn*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**)&d_ysigma, memblobsnum*BlockSize*sizeof(float));
hipMemset(d_ysigma, 0, memblobsnum*BlockSize*sizeof(float));
hipMemcpy(d_ysigma, LAy,blobn*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**)&d_covariance, memblobsnum*BlockSize*sizeof(float));
hipMemset(d_covariance, 0, memblobsnum*BlockSize*sizeof(float));
hipMemcpy(d_covariance, cov,blobn*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**)&d_im, boxsz*boxsz*memblobsnum*BlockSize*sizeof(float));
hipMemset(d_im, 0, boxsz*boxsz*memblobsnum*BlockSize*sizeof(float));
hipMalloc((void**)&d_xl, memblobsnum*BlockSize*sizeof(float));
hipMemset(d_xl, 0, memblobsnum*BlockSize*sizeof(float));
hipMalloc((void**)&d_yt, memblobsnum*BlockSize*sizeof(float));
hipMemset(d_yt, 0, memblobsnum*BlockSize*sizeof(float));
//only run NK blocks in each kernel
int numK=(int)ceil((float)blobn/BlockSize/NK);
for (int ii=0;ii<numK;ii++) {
int blockx = (int) min(ceil(((float)(((float)blobn)/BlockSize)-ii*NK)), NK);
blockx = max(blockx,1);
int threadx= BlockSize;
dim3 dimBlock(threadx);
dim3 dimGrid(blockx);
//printf("threadx: %d,blockx: %d\n", threadx, blockx);
switch (flag)
{
case 0:
hipLaunchKernelGGL(( kernel_guassiansampleblobs), dim3(dimGrid), dim3(dimBlock), 0, 0, ii,BlockSize,boxsz, d_xarray,d_yarray,d_Narray, d_xsigma,d_ysigma,d_covariance,d_im,d_xl,d_yt);
CUDAERROR(text,__LINE__);
break;//15x15 images, 64 per block
case 1:
hipLaunchKernelGGL(( kernel_guassianintegrateblobs), dim3(dimGrid), dim3(dimBlock), 0, 0, ii,BlockSize,boxsz, d_xarray,d_yarray,d_Narray, d_xsigma,d_ysigma,d_covariance,d_im,d_xl,d_yt);
CUDAERROR(text,__LINE__);
break;//15x15 images, 64 per block
}
// too make the loop work, we have to operate on d_data
// this trick works to over smart compiler!
hipMemcpy(d_xarray, xx, 1*sizeof(float), hipMemcpyHostToDevice);
//printf("Fitting subimages: %d/%d is DONE...\n", (ii+1), numK);
//CUDAERRROR("kernel");
mexEvalString("pause(0.001)");
}
subim= (float * )malloc(blobn*boxsz*boxsz*sizeof(float));
memset (subim,0,blobn*boxsz*boxsz*sizeof(float));
xl=(float * )malloc(blobn*sizeof(float));
memset(xl,0,blobn*sizeof(float));
yt=(float * )malloc(blobn*sizeof(float));
memset(yt,0,blobn*sizeof(float));
//reconstruct images
hipMemcpy(subim, d_im, blobn*boxsz*boxsz*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(xl, d_xl, blobn*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(yt, d_yt, blobn*sizeof(float), hipMemcpyDeviceToHost);
for(kk=0;kk<blobn;kk++){
for(jj=0;jj<boxsz;jj++){
for(iii=0;iii<boxsz;iii++){
if ((((int)xl[kk]+iii)<xsz)&&(((int)yt[kk]+jj)<ysz)){
loc=((int)yt[kk]+jj)*xsz+(int)xl[kk]+iii;
if((subim[kk*boxsz*boxsz+jj*boxsz+iii]>0)&&(subim[kk*boxsz*boxsz+jj*boxsz+iii]<100000))
im[loc]+=subim[kk*boxsz*boxsz+jj*boxsz+iii];
}
}
}
}
free(subim);
free(xl);
free(yt);
hipFree(d_xarray);
hipFree(d_yarray);
hipFree(d_Narray);
hipFree(d_xsigma);
hipFree(d_ysigma);
hipFree(d_covariance);
hipFree(d_im);
hipFree(d_xl);
hipFree(d_yt);
}
//kernel_guassiansampleblobs<<<dimGrid, dimBlock>>>(ii,blockx,BlockSize,sz, d_xarray,d_yarray,d_Narray, d_xsigma,d_ysigma,d_covariance,d_im,d_xl,d_yt); //15x15 images, 64 per block
__global__ void kernel_guassiansampleblobs(int iiK,int BlockSize, int sz, float *d_xarray,float *d_yarray,
float *d_Narray, float *d_xsigma,float *d_ysigma,float *d_covariance,
float *d_im,float *d_xl,float *d_yt )
{
/*!
* \brief dunno.
* \param iiK
* \param BlockSize
* \param sz
* \param d_xarray
* \param d_yarray
* \param d_Narray
* \param d_xsigma
* \param d_ysigma
* \param d_covariance
* \param d_im
* \param d_xl
* \param d_yt
*/
int tx = threadIdx.x; //matrix number index
int bx = blockIdx.x;
float x,y,xsigma,ysigma,covariance,N;
int xl;
int yt;
int ii,jj,pixelx,pixely;
//float model;//
__shared__ float s_im[imMEM];
bx=bx+iiK*NK;
//import datas from device to shared memory
x=d_xarray[bx*BlockSize+tx];
y=d_yarray[bx*BlockSize+tx];
N=d_Narray[bx*BlockSize+tx];
xsigma=d_xsigma[bx*BlockSize+tx];
ysigma=d_ysigma[bx*BlockSize+tx];
covariance=d_covariance[bx*BlockSize+tx]/xsigma/ysigma;
xl=round(x)-round(float(sz/2-1) );
xl=max(xl,0);
yt=round(y)-round(float (sz/2-1));
yt=max(yt,0);
for (ii=0;ii<sz;ii++)
{
for(jj=0;jj<sz;jj++) {
// generate model for pixel ii jj
pixelx=ii;
pixely=jj;
s_im[tx*sz*sz+jj*sz+ii]=N/(2*pi*xsigma*ysigma*sqrt(1-pow(covariance,2)))*exp(-1/(2*(1-pow(covariance,2)))*(pow(x-xl-pixelx,2)/pow(xsigma,2)+pow(y-yt-pixely,2)/pow(ysigma,2)-2*covariance*(x-xl-pixelx)*(y-yt-pixely)/(xsigma*ysigma)));
}
}
for (ii=0;ii<sz;ii++){
for(jj=0;jj<sz;jj++)
{
d_im[bx*BlockSize*sz*sz+tx*sz*sz+jj*sz+ii]=s_im[tx*sz*sz+jj*sz+ii];
d_xl[bx*BlockSize+tx]=xl;
d_yt[bx*BlockSize+tx]=yt;
}
}
return;
}
__global__ void kernel_guassianintegrateblobs(int iiK,int BlockSize, int sz, float *d_xarray,float *d_yarray,
float *d_Narray, float *d_xsigma,float *d_ysigma,float *d_covariance,
float *d_im,float *d_xl,float *d_yt )
{
/*!
* \brief dunno.
* \param iiK
* \param BlockSize
* \param sz
* \param d_xarray
* \param d_yarray
* \param d_Narray
* \param d_xsigma
* \param d_ysigma
* \param d_covariance
* \param d_im
* \param d_xl
* \param d_yt
*/
int tx = threadIdx.x; //matrix number index
int bx = blockIdx.x;
float x,y,xsigma,ysigma,covariance;
//float covariance
float N;
int xl;
int yt;
int ii,jj,pixelx,pixely;
//float model;//
__shared__ float s_im[imMEM];
bx=bx+iiK*NK;
//import datas from device to shared memory
x=d_xarray[bx*BlockSize+tx];
y=d_yarray[bx*BlockSize+tx];
N=d_Narray[bx*BlockSize+tx];
xsigma=d_xsigma[bx*BlockSize+tx];
ysigma=d_ysigma[bx*BlockSize+tx];
covariance=d_covariance[bx*BlockSize+tx];
xl=round(x)-round(float (sz/2-1));
xl=max(xl,0);
yt=round(y)-round(float (sz/2-1));
yt=max(yt,0);
for (ii=0;ii<sz;ii++) {
for(jj=0;jj<sz;jj++) {
// generate model for pixel ii jj
pixelx=ii;
pixely=jj;
s_im[tx*sz*sz+jj*sz+ii]=N/4*(erf((x-xl-pixelx-0.5)/sqrt(2*pow(xsigma,2)))-erf((x-xl-pixelx+0.5)/sqrt(2*pow(xsigma,2))))*(erf((y-yt-pixely-0.5)/sqrt(2*pow(ysigma,2)))-erf((y-yt-pixely+0.5)/sqrt(2*pow(ysigma,2)))); //exp(-1/(2*(1-pow(covariance,2)))*(pow(x-xl-pixelx,2)/pow(xsigma,2)+pow(y-yt-pixely,2)/pow(ysigma,2)-2*covariance*(x-xl-pixelx)*(y-yt-pixely)/(xsigma*ysigma)));
}
}
for (ii=0;ii<sz;ii++) {
for(jj=0;jj<sz;jj++)
{
d_im[bx*BlockSize*sz*sz+tx*sz*sz+jj*sz+ii]=s_im[tx*sz*sz+jj*sz+ii];
d_xl[bx*BlockSize+tx]=xl;
d_yt[bx*BlockSize+tx]=yt;
}
}
return;
}
|
078d1fe0c046346c940e834eefdfe9b88b4235f2.cu
|
/*! \file GPUgenerateblobs.cu
* \author Fang Huang
* \date October 10, 2010
* \brief This file contains all relevant code to reassemble the final image
* from the fitted subregions.
*/
// includes, system
#include "image_operation.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "mex.h"
#include "matrix.h"
// Thread block size
#define BSZ 64
#define MEM 70
#define IMSZ 11
#define IMSZBIG 21
#define imMEM 4000
#define NK 128 //number of blocks to run in each kernel
#define pi 3.141592f
#define min(a,b) (((a) < (b)) ? (a) : (b))
#define max(a,b) (((a) > (b)) ? (a) : (b))
void cudasafe( cudaError_t err, char* str, int lineNumber);
void CUDAERROR(const char *instr,int lineNumber);
__global__ void kernel_guassiansampleblobs(int,int,int, float*,float*,float*, float*,float*,float*,float*,float*,float*);
__global__ void kernel_guassianintegrateblobs(int,int,int, float*,float*,float*, float*,float*,float*,float*,float*,float*);
void GPUgenerateblobs(int blobn,int xsz,int ysz,float *xx,float *yy, float *nn, float *LAx, float *LAy, float *cov, float *im)
{
/*!
* \brief Setup memory and run the CUDA kernel_guassian methods.
* \param blobn
* \param xsz
* \param ysz
* \param xx
* \param yy
* \param nn
* \param LAx
* \param LAy
* \param cov
* \param im foo?
*/
float *d_xarray,*d_yarray,*d_Narray,*d_xsigma,*d_ysigma,*d_covariance,*d_im,*d_xl,*d_yt;
float *subim,*xl,*yt;
int ii,iii,jj,kk,loc,boxsz;
float sigma;
int BlockSize;
int flag=0;
// define text variable
char text[1024];
//find max sigma
float maxsigma=-1;
for(ii=0;ii<blobn;ii++){
sigma=sqrt(pow(LAx[ii],2)+pow(LAy[ii],2));
maxsigma=max(maxsigma,sigma);
}
boxsz=(int) round(float (4*maxsigma+1));
boxsz=min(boxsz,20);// max box size is 20, won't work for psfsigma>6
//boxsz=20;
BlockSize=(int) min(ceil((float) 15000/4/boxsz/boxsz),64);
int memblobsnum=(int)ceil((float)blobn/BlockSize)+128;
cudaMalloc((void**)&d_xarray, memblobsnum*BlockSize*sizeof(float));
cudaMemset(d_xarray, 0, memblobsnum*BlockSize*sizeof(float));
cudaMemcpy(d_xarray, xx, blobn*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_yarray, memblobsnum*BlockSize*sizeof(float));
cudaMemset(d_yarray, 0, memblobsnum*BlockSize*sizeof(float));
cudaMemcpy(d_yarray, yy,blobn*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_Narray, memblobsnum*BlockSize*sizeof(float));
cudaMemset(d_Narray, 0, memblobsnum*BlockSize*sizeof(float));
cudaMemcpy(d_Narray, nn,blobn*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_xsigma, memblobsnum*BlockSize*sizeof(float));
cudaMemset(d_xsigma, 0, memblobsnum*BlockSize*sizeof(float));
cudaMemcpy(d_xsigma, LAx,blobn*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_ysigma, memblobsnum*BlockSize*sizeof(float));
cudaMemset(d_ysigma, 0, memblobsnum*BlockSize*sizeof(float));
cudaMemcpy(d_ysigma, LAy,blobn*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_covariance, memblobsnum*BlockSize*sizeof(float));
cudaMemset(d_covariance, 0, memblobsnum*BlockSize*sizeof(float));
cudaMemcpy(d_covariance, cov,blobn*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_im, boxsz*boxsz*memblobsnum*BlockSize*sizeof(float));
cudaMemset(d_im, 0, boxsz*boxsz*memblobsnum*BlockSize*sizeof(float));
cudaMalloc((void**)&d_xl, memblobsnum*BlockSize*sizeof(float));
cudaMemset(d_xl, 0, memblobsnum*BlockSize*sizeof(float));
cudaMalloc((void**)&d_yt, memblobsnum*BlockSize*sizeof(float));
cudaMemset(d_yt, 0, memblobsnum*BlockSize*sizeof(float));
//only run NK blocks in each kernel
int numK=(int)ceil((float)blobn/BlockSize/NK);
for (int ii=0;ii<numK;ii++) {
int blockx = (int) min(ceil(((float)(((float)blobn)/BlockSize)-ii*NK)), NK);
blockx = max(blockx,1);
int threadx= BlockSize;
dim3 dimBlock(threadx);
dim3 dimGrid(blockx);
//printf("threadx: %d,blockx: %d\n", threadx, blockx);
switch (flag)
{
case 0:
kernel_guassiansampleblobs<<<dimGrid, dimBlock>>>(ii,BlockSize,boxsz, d_xarray,d_yarray,d_Narray, d_xsigma,d_ysigma,d_covariance,d_im,d_xl,d_yt);
CUDAERROR(text,__LINE__);
break;//15x15 images, 64 per block
case 1:
kernel_guassianintegrateblobs<<<dimGrid, dimBlock>>>(ii,BlockSize,boxsz, d_xarray,d_yarray,d_Narray, d_xsigma,d_ysigma,d_covariance,d_im,d_xl,d_yt);
CUDAERROR(text,__LINE__);
break;//15x15 images, 64 per block
}
// too make the loop work, we have to operate on d_data
// this trick works to over smart compiler!
cudaMemcpy(d_xarray, xx, 1*sizeof(float), cudaMemcpyHostToDevice);
//printf("Fitting subimages: %d/%d is DONE...\n", (ii+1), numK);
//CUDAERRROR("kernel");
mexEvalString("pause(0.001)");
}
subim= (float * )malloc(blobn*boxsz*boxsz*sizeof(float));
memset (subim,0,blobn*boxsz*boxsz*sizeof(float));
xl=(float * )malloc(blobn*sizeof(float));
memset(xl,0,blobn*sizeof(float));
yt=(float * )malloc(blobn*sizeof(float));
memset(yt,0,blobn*sizeof(float));
//reconstruct images
cudaMemcpy(subim, d_im, blobn*boxsz*boxsz*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(xl, d_xl, blobn*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(yt, d_yt, blobn*sizeof(float), cudaMemcpyDeviceToHost);
for(kk=0;kk<blobn;kk++){
for(jj=0;jj<boxsz;jj++){
for(iii=0;iii<boxsz;iii++){
if ((((int)xl[kk]+iii)<xsz)&&(((int)yt[kk]+jj)<ysz)){
loc=((int)yt[kk]+jj)*xsz+(int)xl[kk]+iii;
if((subim[kk*boxsz*boxsz+jj*boxsz+iii]>0)&&(subim[kk*boxsz*boxsz+jj*boxsz+iii]<100000))
im[loc]+=subim[kk*boxsz*boxsz+jj*boxsz+iii];
}
}
}
}
free(subim);
free(xl);
free(yt);
cudaFree(d_xarray);
cudaFree(d_yarray);
cudaFree(d_Narray);
cudaFree(d_xsigma);
cudaFree(d_ysigma);
cudaFree(d_covariance);
cudaFree(d_im);
cudaFree(d_xl);
cudaFree(d_yt);
}
//kernel_guassiansampleblobs<<<dimGrid, dimBlock>>>(ii,blockx,BlockSize,sz, d_xarray,d_yarray,d_Narray, d_xsigma,d_ysigma,d_covariance,d_im,d_xl,d_yt); //15x15 images, 64 per block
__global__ void kernel_guassiansampleblobs(int iiK,int BlockSize, int sz, float *d_xarray,float *d_yarray,
float *d_Narray, float *d_xsigma,float *d_ysigma,float *d_covariance,
float *d_im,float *d_xl,float *d_yt )
{
/*!
* \brief dunno.
* \param iiK
* \param BlockSize
* \param sz
* \param d_xarray
* \param d_yarray
* \param d_Narray
* \param d_xsigma
* \param d_ysigma
* \param d_covariance
* \param d_im
* \param d_xl
* \param d_yt
*/
int tx = threadIdx.x; //matrix number index
int bx = blockIdx.x;
float x,y,xsigma,ysigma,covariance,N;
int xl;
int yt;
int ii,jj,pixelx,pixely;
//float model;//
__shared__ float s_im[imMEM];
bx=bx+iiK*NK;
//import datas from device to shared memory
x=d_xarray[bx*BlockSize+tx];
y=d_yarray[bx*BlockSize+tx];
N=d_Narray[bx*BlockSize+tx];
xsigma=d_xsigma[bx*BlockSize+tx];
ysigma=d_ysigma[bx*BlockSize+tx];
covariance=d_covariance[bx*BlockSize+tx]/xsigma/ysigma;
xl=round(x)-round(float(sz/2-1) );
xl=max(xl,0);
yt=round(y)-round(float (sz/2-1));
yt=max(yt,0);
for (ii=0;ii<sz;ii++)
{
for(jj=0;jj<sz;jj++) {
// generate model for pixel ii jj
pixelx=ii;
pixely=jj;
s_im[tx*sz*sz+jj*sz+ii]=N/(2*pi*xsigma*ysigma*sqrt(1-pow(covariance,2)))*exp(-1/(2*(1-pow(covariance,2)))*(pow(x-xl-pixelx,2)/pow(xsigma,2)+pow(y-yt-pixely,2)/pow(ysigma,2)-2*covariance*(x-xl-pixelx)*(y-yt-pixely)/(xsigma*ysigma)));
}
}
for (ii=0;ii<sz;ii++){
for(jj=0;jj<sz;jj++)
{
d_im[bx*BlockSize*sz*sz+tx*sz*sz+jj*sz+ii]=s_im[tx*sz*sz+jj*sz+ii];
d_xl[bx*BlockSize+tx]=xl;
d_yt[bx*BlockSize+tx]=yt;
}
}
return;
}
__global__ void kernel_guassianintegrateblobs(int iiK,int BlockSize, int sz, float *d_xarray,float *d_yarray,
float *d_Narray, float *d_xsigma,float *d_ysigma,float *d_covariance,
float *d_im,float *d_xl,float *d_yt )
{
/*!
* \brief dunno.
* \param iiK
* \param BlockSize
* \param sz
* \param d_xarray
* \param d_yarray
* \param d_Narray
* \param d_xsigma
* \param d_ysigma
* \param d_covariance
* \param d_im
* \param d_xl
* \param d_yt
*/
int tx = threadIdx.x; //matrix number index
int bx = blockIdx.x;
float x,y,xsigma,ysigma,covariance;
//float covariance
float N;
int xl;
int yt;
int ii,jj,pixelx,pixely;
//float model;//
__shared__ float s_im[imMEM];
bx=bx+iiK*NK;
//import datas from device to shared memory
x=d_xarray[bx*BlockSize+tx];
y=d_yarray[bx*BlockSize+tx];
N=d_Narray[bx*BlockSize+tx];
xsigma=d_xsigma[bx*BlockSize+tx];
ysigma=d_ysigma[bx*BlockSize+tx];
covariance=d_covariance[bx*BlockSize+tx];
xl=round(x)-round(float (sz/2-1));
xl=max(xl,0);
yt=round(y)-round(float (sz/2-1));
yt=max(yt,0);
for (ii=0;ii<sz;ii++) {
for(jj=0;jj<sz;jj++) {
// generate model for pixel ii jj
pixelx=ii;
pixely=jj;
s_im[tx*sz*sz+jj*sz+ii]=N/4*(erf((x-xl-pixelx-0.5)/sqrt(2*pow(xsigma,2)))-erf((x-xl-pixelx+0.5)/sqrt(2*pow(xsigma,2))))*(erf((y-yt-pixely-0.5)/sqrt(2*pow(ysigma,2)))-erf((y-yt-pixely+0.5)/sqrt(2*pow(ysigma,2)))); //exp(-1/(2*(1-pow(covariance,2)))*(pow(x-xl-pixelx,2)/pow(xsigma,2)+pow(y-yt-pixely,2)/pow(ysigma,2)-2*covariance*(x-xl-pixelx)*(y-yt-pixely)/(xsigma*ysigma)));
}
}
for (ii=0;ii<sz;ii++) {
for(jj=0;jj<sz;jj++)
{
d_im[bx*BlockSize*sz*sz+tx*sz*sz+jj*sz+ii]=s_im[tx*sz*sz+jj*sz+ii];
d_xl[bx*BlockSize+tx]=xl;
d_yt[bx*BlockSize+tx]=yt;
}
}
return;
}
|
27085c8047384ab21a2dd83a6162dbafd57c7dec.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cassert>
#include <cfloat>
#include "private.h"
#define MAX_BLOCK_SIZE 1024
/// The number of dimensions. Constant on every device.
__constant__ uint32_t d_dim;
/// Calculates the gamma distribution of the specified size from two uniform
/// distributions.
/// @param size The number of samples to write.
/// @param v1 in The first array with uniformly distributed values in [0, 1].
/// @param v2 in,out The second array with uniformly distributed values in [0, 1].
/// The output is written to it.
/// @note v1 and v2 must be independent (e.g., not the same), otherwise you will
/// get an invalid result.
__global__ void gamma_cuda(uint32_t size, const float *__restrict__ v1,
float *__restrict__ v2) {
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size) {
return;
}
v2[index] = -logf(v1[index] * v2[index]);
}
/// Calculates the natural logarithm of the array.
/// @param size The length of the array.
/// @param v in,out The array to read and write.
__global__ void log_cuda(uint32_t size, float *v) {
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size) {
return;
}
v[index] = logf(v[index]);
}
/// Weighted MinHash kernel. The argument names follow the paper:
/// http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/36928.pdf
/// @param rs Gamma(2,1)-random samples. The length must be the product of
/// number of processed samples (vectors) by the number of dimensions.
/// @param ln_cs Logarithm over the gamma(2,1) distribution. Same length as rs.
/// @param betas Uniformly [0, 1] distributed samples. Same length as rs.
/// @param weights CSR's data.
/// @param cols CSR's indices.
/// @param rows CSR's indptrs.
/// @param plan Execution plan, consists of 2 parts: the first is the offset
/// table and the second is the row indices
/// @param sample_delta How many hashes to process in a single thread. Depends
/// on the shared memory size.
/// @param device_row_offset Shard offset in rows. Specific to every device.
/// @param device_wc_offset Shard offset in weights and cols. Specific to every
/// device.
/// @param hashes The output of size number of vectors x number of hashes for
/// each x 2.
__global__ void weighted_minhash_cuda(
const float *__restrict__ rs, const float *__restrict__ ln_cs,
const float *__restrict__ betas, const float *__restrict__ weights,
const uint32_t *__restrict__ cols, const uint32_t *__restrict__ rows,
const int32_t *__restrict__ plan, const int sample_delta,
const uint32_t device_row_offset, const uint32_t device_wc_offset,
uint32_t *__restrict__ hashes) {
const uint32_t thread_index = blockIdx.y * blockDim.y + threadIdx.y;
const uint32_t sample_index = threadIdx.x;
int32_t row_offset = plan[thread_index];
int32_t row_border = plan[thread_index + 1];
if (row_offset == row_border) {
return;
}
const uint32_t sample_offset = sample_index * sample_delta;
const uint32_t samples = blockDim.x * sample_delta;
extern __shared__ float shmem[];
float *volatile lnmins = &shmem[(threadIdx.y * blockDim.x + sample_index) * 3 * sample_delta];
uint2 *volatile dtmins = reinterpret_cast<uint2 *>(lnmins + sample_delta);
int32_t row = -1;
for (uint32_t index = 0, border = 0;; index++) {
if (index >= border) {
for (uint32_t s = 0; s < sample_delta; s++) {
lnmins[s] = FLT_MAX;
}
if (row >= 0) {
for (int s = 0; s < sample_delta; s++) {
auto hash = reinterpret_cast<uint2 *>(hashes +
((row - device_row_offset) * samples + s + sample_offset) * 2);
*hash = dtmins[s];
}
}
if (row_offset >= row_border) {
break;
}
row = plan[row_offset++];
index = rows[row - device_row_offset];
border = rows[row - device_row_offset + 1];
}
const float w = logf(weights[index - device_wc_offset]);
const uint32_t d = cols[index - device_wc_offset];
volatile int64_t ci = static_cast<int64_t>(sample_offset) * d_dim + d;
#pragma unroll 4
for (int s = 0; s < sample_delta; s++, ci += d_dim) {
// We apply the logarithm trick here: log (a / z) = log a - log z
float r = rs[ci];
float beta = betas[ci];
float t = floorf(w / r + beta);
float ln_y = (t - beta) * r;
float ln_a = ln_cs[ci] - ln_y - r;
if (ln_a < lnmins[s]) {
lnmins[s] = ln_a;
dtmins[s] = {d, static_cast<uint32_t>(t)};
}
}
}
}
extern "C" {
/// Calls gamma_cuda() kernel.
hipError_t gamma_(uint32_t size, const float *v1, float *v2) {
dim3 block(MAX_BLOCK_SIZE, 1, 1);
dim3 grid(size / block.x + 1, 1, 1);
hipLaunchKernelGGL(( gamma_cuda), dim3(grid), dim3(block), 0, 0, size, v1, v2);
RETERR(hipDeviceSynchronize());
return hipSuccess;
}
/// Calls log_cuda() kernel.
hipError_t log_(uint32_t size, float *v) {
dim3 block(MAX_BLOCK_SIZE, 1, 1);
dim3 grid(size / block.x + 1, 1, 1);
hipLaunchKernelGGL(( log_cuda), dim3(grid), dim3(block), 0, 0, size, v);
RETERR(hipDeviceSynchronize());
return hipSuccess;
}
/// Copies the number of dimensions (size of each sample) to a symbol on each
/// device.
MHCUDAResult setup_weighted_minhash(
uint32_t dim, const std::vector<int> &devs, int verbosity) {
FOR_EACH_DEV(
CUCH(hipMemcpyToSymbol(d_dim, &dim, sizeof(dim)),
mhcudaMemoryCopyError);
);
return mhcudaSuccess;
}
/// Calls the corresponding kernel.
MHCUDAResult weighted_minhash(
const udevptrs<float> &rs, const udevptrs<float> &ln_cs,
const udevptrs<float> &betas, const udevptrs<float> &weights,
const udevptrs<uint32_t> &cols, const udevptrs<uint32_t> &rows,
int samples, const std::vector<int> &sample_deltas,
const udevptrs<int32_t> &plan, const std::vector<uint32_t> &split,
const uint32_t *original_rows, const std::vector<uint32_t> &grid_sizes,
const std::vector<int> &devs, int verbosity, udevptrs<uint32_t> *hashes) {
FOR_EACH_DEVI(
int sample_delta = sample_deltas[devi];
int spt = samples / sample_delta;
assert(MINHASH_BLOCK_SIZE % spt == 0);
dim3 block(spt, MINHASH_BLOCK_SIZE / spt, 1);
dim3 grid(1, grid_sizes[devi], 1);
int shmem = 3 * sizeof(float) * MINHASH_BLOCK_SIZE * sample_delta;
uint32_t row_offset = (devi > 0)? split[devi - 1] : 0;
DEBUG("dev #%d: <<<%d, [%d, %d], %d>>>(%u, %u)\n",
devs[devi], grid.x, block.x, block.y, shmem,
static_cast<unsigned>(row_offset),
static_cast<unsigned>(original_rows[row_offset]));
hipLaunchKernelGGL(( weighted_minhash_cuda), dim3(grid), dim3(block), shmem, 0,
rs[devi].get(), ln_cs[devi].get(), betas[devi].get(),
weights[devi].get(), cols[devi].get(), rows[devi].get(),
plan[devi].get(), sample_delta, row_offset, original_rows[row_offset],
(*hashes)[devi].get());
);
return mhcudaSuccess;
}
} // extern "C"
|
27085c8047384ab21a2dd83a6162dbafd57c7dec.cu
|
#include <cassert>
#include <cfloat>
#include "private.h"
#define MAX_BLOCK_SIZE 1024
/// The number of dimensions. Constant on every device.
__constant__ uint32_t d_dim;
/// Calculates the gamma distribution of the specified size from two uniform
/// distributions.
/// @param size The number of samples to write.
/// @param v1 in The first array with uniformly distributed values in [0, 1].
/// @param v2 in,out The second array with uniformly distributed values in [0, 1].
/// The output is written to it.
/// @note v1 and v2 must be independent (e.g., not the same), otherwise you will
/// get an invalid result.
__global__ void gamma_cuda(uint32_t size, const float *__restrict__ v1,
float *__restrict__ v2) {
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size) {
return;
}
v2[index] = -logf(v1[index] * v2[index]);
}
/// Calculates the natural logarithm of the array.
/// @param size The length of the array.
/// @param v in,out The array to read and write.
__global__ void log_cuda(uint32_t size, float *v) {
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size) {
return;
}
v[index] = logf(v[index]);
}
/// Weighted MinHash kernel. The argument names follow the paper:
/// http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/36928.pdf
/// @param rs Gamma(2,1)-random samples. The length must be the product of
/// number of processed samples (vectors) by the number of dimensions.
/// @param ln_cs Logarithm over the gamma(2,1) distribution. Same length as rs.
/// @param betas Uniformly [0, 1] distributed samples. Same length as rs.
/// @param weights CSR's data.
/// @param cols CSR's indices.
/// @param rows CSR's indptrs.
/// @param plan Execution plan, consists of 2 parts: the first is the offset
/// table and the second is the row indices
/// @param sample_delta How many hashes to process in a single thread. Depends
/// on the shared memory size.
/// @param device_row_offset Shard offset in rows. Specific to every device.
/// @param device_wc_offset Shard offset in weights and cols. Specific to every
/// device.
/// @param hashes The output of size number of vectors x number of hashes for
/// each x 2.
__global__ void weighted_minhash_cuda(
const float *__restrict__ rs, const float *__restrict__ ln_cs,
const float *__restrict__ betas, const float *__restrict__ weights,
const uint32_t *__restrict__ cols, const uint32_t *__restrict__ rows,
const int32_t *__restrict__ plan, const int sample_delta,
const uint32_t device_row_offset, const uint32_t device_wc_offset,
uint32_t *__restrict__ hashes) {
const uint32_t thread_index = blockIdx.y * blockDim.y + threadIdx.y;
const uint32_t sample_index = threadIdx.x;
int32_t row_offset = plan[thread_index];
int32_t row_border = plan[thread_index + 1];
if (row_offset == row_border) {
return;
}
const uint32_t sample_offset = sample_index * sample_delta;
const uint32_t samples = blockDim.x * sample_delta;
extern __shared__ float shmem[];
float *volatile lnmins = &shmem[(threadIdx.y * blockDim.x + sample_index) * 3 * sample_delta];
uint2 *volatile dtmins = reinterpret_cast<uint2 *>(lnmins + sample_delta);
int32_t row = -1;
for (uint32_t index = 0, border = 0;; index++) {
if (index >= border) {
for (uint32_t s = 0; s < sample_delta; s++) {
lnmins[s] = FLT_MAX;
}
if (row >= 0) {
for (int s = 0; s < sample_delta; s++) {
auto hash = reinterpret_cast<uint2 *>(hashes +
((row - device_row_offset) * samples + s + sample_offset) * 2);
*hash = dtmins[s];
}
}
if (row_offset >= row_border) {
break;
}
row = plan[row_offset++];
index = rows[row - device_row_offset];
border = rows[row - device_row_offset + 1];
}
const float w = logf(weights[index - device_wc_offset]);
const uint32_t d = cols[index - device_wc_offset];
volatile int64_t ci = static_cast<int64_t>(sample_offset) * d_dim + d;
#pragma unroll 4
for (int s = 0; s < sample_delta; s++, ci += d_dim) {
// We apply the logarithm trick here: log (a / z) = log a - log z
float r = rs[ci];
float beta = betas[ci];
float t = floorf(w / r + beta);
float ln_y = (t - beta) * r;
float ln_a = ln_cs[ci] - ln_y - r;
if (ln_a < lnmins[s]) {
lnmins[s] = ln_a;
dtmins[s] = {d, static_cast<uint32_t>(t)};
}
}
}
}
extern "C" {
/// Calls gamma_cuda() kernel.
cudaError_t gamma_(uint32_t size, const float *v1, float *v2) {
dim3 block(MAX_BLOCK_SIZE, 1, 1);
dim3 grid(size / block.x + 1, 1, 1);
gamma_cuda<<<grid, block>>>(size, v1, v2);
RETERR(cudaDeviceSynchronize());
return cudaSuccess;
}
/// Calls log_cuda() kernel.
cudaError_t log_(uint32_t size, float *v) {
dim3 block(MAX_BLOCK_SIZE, 1, 1);
dim3 grid(size / block.x + 1, 1, 1);
log_cuda<<<grid, block>>>(size, v);
RETERR(cudaDeviceSynchronize());
return cudaSuccess;
}
/// Copies the number of dimensions (size of each sample) to a symbol on each
/// device.
MHCUDAResult setup_weighted_minhash(
uint32_t dim, const std::vector<int> &devs, int verbosity) {
FOR_EACH_DEV(
CUCH(cudaMemcpyToSymbol(d_dim, &dim, sizeof(dim)),
mhcudaMemoryCopyError);
);
return mhcudaSuccess;
}
/// Calls the corresponding kernel.
MHCUDAResult weighted_minhash(
const udevptrs<float> &rs, const udevptrs<float> &ln_cs,
const udevptrs<float> &betas, const udevptrs<float> &weights,
const udevptrs<uint32_t> &cols, const udevptrs<uint32_t> &rows,
int samples, const std::vector<int> &sample_deltas,
const udevptrs<int32_t> &plan, const std::vector<uint32_t> &split,
const uint32_t *original_rows, const std::vector<uint32_t> &grid_sizes,
const std::vector<int> &devs, int verbosity, udevptrs<uint32_t> *hashes) {
FOR_EACH_DEVI(
int sample_delta = sample_deltas[devi];
int spt = samples / sample_delta;
assert(MINHASH_BLOCK_SIZE % spt == 0);
dim3 block(spt, MINHASH_BLOCK_SIZE / spt, 1);
dim3 grid(1, grid_sizes[devi], 1);
int shmem = 3 * sizeof(float) * MINHASH_BLOCK_SIZE * sample_delta;
uint32_t row_offset = (devi > 0)? split[devi - 1] : 0;
DEBUG("dev #%d: <<<%d, [%d, %d], %d>>>(%u, %u)\n",
devs[devi], grid.x, block.x, block.y, shmem,
static_cast<unsigned>(row_offset),
static_cast<unsigned>(original_rows[row_offset]));
weighted_minhash_cuda<<<grid, block, shmem>>>(
rs[devi].get(), ln_cs[devi].get(), betas[devi].get(),
weights[devi].get(), cols[devi].get(), rows[devi].get(),
plan[devi].get(), sample_delta, row_offset, original_rows[row_offset],
(*hashes)[devi].get());
);
return mhcudaSuccess;
}
} // extern "C"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.